entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
NormedConv2d
import torch import torch.nn as nn class NormedConv2d(nn.Conv2d): """Normalized Conv2d Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. norm_over_kernel (bool, optional): Normalize over kernel. Default to False. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, norm_over_kernel=False, **kwargs): super(NormedConv2d, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.norm_over_kernel = norm_over_kernel self.eps = eps def forward(self, x): if not self.norm_over_kernel: weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True). pow(self.power) + self.eps) else: weight_ = self.weight / (self.weight.view(self.weight.size(0), -1).norm(dim=1, keepdim=True).pow(self.power)[..., None, None] + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x_ * self.tempearture if hasattr(self, 'conv2d_forward'): x_ = self.conv2d_forward(x_, weight_) elif torch.__version__ >= '1.8': x_ = self._conv_forward(x_, weight_, self.bias) else: x_ = self._conv_forward(x_, weight_) return x_ def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp16 = 20.0 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_pow_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1[grid(256)]( primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_2[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf3, primals_1, buf0, buf1 class NormedConv2dNew(nn.Conv2d): """Normalized Conv2d Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. norm_over_kernel (bool, optional): Normalize over kernel. Default to False. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, norm_over_kernel=False, **kwargs): super(NormedConv2dNew, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.norm_over_kernel = norm_over_kernel self.eps = eps def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
FMsunyh/mmdetection
NormedConv2d
false
13,672
[ "Apache-2.0" ]
240
d3683eb06d1041aa3d55f35ad81d8c37718a4c2d
https://github.com/FMsunyh/mmdetection/tree/d3683eb06d1041aa3d55f35ad81d8c37718a4c2d
TemperatureTanh
import torch from torch import Tensor from torch.functional import Tensor from torch import nn as nn class TemperatureTanh(nn.Module): def __init__(self, temperature: 'float'=1.0) ->None: """The hyperbolic tangent with an optional temperature.""" super().__init__() assert temperature != 0.0, 'temperature must be nonzero.' self._T = temperature self.tanh = torch.nn.Tanh() def forward(self, x: 'Tensor') ->Tensor: return self.tanh(x / self._T) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TemperatureTanhNew(nn.Module): def __init__(self, temperature: 'float'=1.0) ->None: """The hyperbolic tangent with an optional temperature.""" super().__init__() assert temperature != 0.0, 'temperature must be nonzero.' self._T = temperature self.tanh = torch.nn.Tanh() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Felix2048/VLN-CE
TemperatureTanh
false
13,673
[ "MIT" ]
106
4ea21f2af0d869ae65dd6677a53e788233f93761
https://github.com/Felix2048/VLN-CE/tree/4ea21f2af0d869ae65dd6677a53e788233f93761
Net
import torch import torch.nn as tnn class Net(tnn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = tnn.Conv2d(3, 6, 5) self.pool = tnn.MaxPool2d(2, 2) self.conv2 = tnn.Conv2d(6, 16, 5) self.fc1 = tnn.Linear(16 * 5 * 5, 120) self.fc2 = tnn.Linear(120, 84) self.fc3 = tnn.Linear(84, 10) def forward(self, x): x = self.pool(self.conv1(x)) x = self.pool(self.conv2(x)) x = x.view(-1, 16 * 5 * 5) x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as tnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400 ), 0), alpha=1, beta=1, out=buf8) del primals_7 buf9 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), alpha=1, beta=1, out=buf9) del primals_9 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf9, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf10) del primals_11 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf8, buf9, primals_10, primals_8, primals_6) class NetNew(tnn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = tnn.Conv2d(3, 6, 5) self.pool = tnn.MaxPool2d(2, 2) self.conv2 = tnn.Conv2d(6, 16, 5) self.fc1 = tnn.Linear(16 * 5 * 5, 120) self.fc2 = tnn.Linear(120, 84) self.fc3 = tnn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Exusial/jittor
Net
false
13,674
[ "Apache-2.0" ]
2,571
eca21d5bba5098bce4f492fa44908677b6e76588
https://github.com/Exusial/jittor/tree/eca21d5bba5098bce4f492fa44908677b6e76588
Attention
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF dot_products.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_key': 4, 'dropout_ratio': 0.5, 'causal': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del arg1_1 buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class AttentionNew(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
FGDBTKD/decaNLP
Attention
false
13,675
[ "BSD-3-Clause" ]
2,361
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
ChannelNorm
import torch import torch.nn as nn class ChannelNorm(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super(ChannelNorm, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, x): cumMean = x.mean(dim=1, keepdim=True) cumVar = x.var(dim=1, keepdim=True) x = (x - cumMean) * torch.rsqrt(cumVar + self.epsilon) if self.weight is not None: x = x * self.weight + self.bias return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'numFeatures': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = xindex // 64 x5 = xindex % 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp10 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class ChannelNormNew(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super(ChannelNormNew, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
EyalSel/CPC_audio
ChannelNorm
false
13,676
[ "MIT" ]
260
b98a1bdf1fe9ea219816db7a6c28115d404a3510
https://github.com/EyalSel/CPC_audio/tree/b98a1bdf1fe9ea219816db7a6c28115d404a3510
Conv
import torch import torch.nn as nn class Conv(nn.Module): """ Convolution Module """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """ :param in_channels: dimension of input :param out_channels: dimension of output :param kernel_size: size of kernel :param stride: size of stride :param padding: size of padding :param dilation: dilation rate :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) def forward(self, x): x = x.contiguous().transpose(1, 2) x = self.conv(x) x = x.contiguous().transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0) class ConvNew(nn.Module): """ Convolution Module """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """ :param in_channels: dimension of input :param out_channels: dimension of output :param kernel_size: size of kernel :param stride: size of stride :param padding: size of padding :param dilation: dilation rate :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(ConvNew, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
FarisHijazi/klaam
Conv
false
13,677
[ "MIT" ]
119
380b3cbf167bd4288cf5f3476e51f0939dff9e2c
https://github.com/FarisHijazi/klaam/tree/380b3cbf167bd4288cf5f3476e51f0939dff9e2c
LinearFeedforward
import torch from torch import nn import torch.utils.data class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(torch, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearFeedforward(nn.Module): def __init__(self, d_in, d_hid, d_out, activation='relu'): super().__init__() self.feedforward = Feedforward(d_in, d_hid, activation=activation) self.linear = Linear(d_hid, d_out) self.dropout = nn.Dropout(0.2) def forward(self, x): return self.dropout(self.linear(self.feedforward(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4, 'd_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(torch, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearFeedforwardNew(nn.Module): def __init__(self, d_in, d_hid, d_out, activation='relu'): super().__init__() self.feedforward = Feedforward(d_in, d_hid, activation=activation) self.linear = Linear(d_hid, d_out) self.dropout = nn.Dropout(0.2) def forward(self, input_0): primals_2 = self.feedforward.linear.weight primals_3 = self.feedforward.linear.bias primals_4 = self.linear.weight primals_5 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
FGDBTKD/decaNLP
LinearFeedforward
false
13,678
[ "BSD-3-Clause" ]
2,361
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
EncoderImagePrecomp
import torch import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init def l2norm(X): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=1).sqrt().view(X.size(0), -1) X = torch.div(X, norm.expand_as(X)) return X class EncoderImagePrecomp(nn.Module): def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False): super(EncoderImagePrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.use_abs = use_abs self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features) if self.use_abs: features = torch.abs(features) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in list(state_dict.items()): if name in own_state: new_state[name] = param super(EncoderImagePrecomp, self).load_state_dict(new_state) def __call__(self, images): return self.forward(images) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor( primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_3, buf0 def l2norm(X): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=1).sqrt().view(X.size(0), -1) X = torch.div(X, norm.expand_as(X)) return X class EncoderImagePrecompNew(nn.Module): def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False): super(EncoderImagePrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.use_abs = use_abs self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in list(state_dict.items()): if name in own_state: new_state[name] = param super(EncoderImagePrecompNew, self).load_state_dict(new_state) def __call__(self, images): return self.forward(images) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ExplorerFreda/VSE-C
EncoderImagePrecomp
false
13,679
[ "MIT" ]
61
52d7742adfe017eacd74f36a5953ea2ace9f5fce
https://github.com/ExplorerFreda/VSE-C/tree/52d7742adfe017eacd74f36a5953ea2ace9f5fce
MultiHead
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF dot_products.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_key': 4, 'd_value': 4, 'n_heads': 4, 'dropout_ratio': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 del buf18 del buf6 return buf19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0 ), buf5, buf9, buf13, buf17, reinterpret_tensor(buf2, (4, 1, 4), ( 16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF dot_products.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHeadNew(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, input_0, input_1, input_2): primals_2 = self.wq.weight primals_4 = self.wk.weight primals_6 = self.wv.weight primals_1 = input_0 primals_3 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
FGDBTKD/decaNLP
MultiHead
false
13,680
[ "BSD-3-Clause" ]
2,361
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
NormLoss
import torch class NormLoss(torch.nn.Module): """ Norm penalty on function parameters: p - dimension of norm """ def __init__(self, p): super(NormLoss, self).__init__() self.p = p def forward(self, beta): return torch.norm(beta, p=self.p) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'p': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tmp1 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 0.25 tmp7 = libdevice.pow(tmp5, tmp6) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class NormLossNew(torch.nn.Module): """ Norm penalty on function parameters: p - dimension of norm """ def __init__(self, p): super(NormLossNew, self).__init__() self.p = p def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Filco306/TopologyLayer
NormLoss
false
13,681
[ "MIT" ]
250
1d6261017a80cff0ee06bb896ded40777b0989b4
https://github.com/Filco306/TopologyLayer/tree/1d6261017a80cff0ee06bb896ded40777b0989b4
BoundaryDiscriminator
import torch import torch.nn as nn class BoundaryDiscriminator(nn.Module): def __init__(self): super(BoundaryDiscriminator, self).__init__() filter_num_list = [64, 128, 256, 512, 1] self.conv1 = nn.Conv2d(1, filter_num_list[0], kernel_size=4, stride =2, padding=2, bias=False) self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False) self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False) self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False) self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False) self.leakyrelu = nn.LeakyReLU(negative_slope=0.2) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.zero_() def forward(self, x): x = self.leakyrelu(self.conv1(x)) x = self.leakyrelu(self.conv2(x)) x = self.leakyrelu(self.conv3(x)) x = self.leakyrelu(self.conv4(x)) x = self.conv5(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 278784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 147968 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, None) tl.store(out_ptr1 + x0, tmp5, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (64, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1)) buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool) buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2, 278784, XBLOCK=512, num_warps=8, num_stages=1) del buf0 buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1)) buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool) buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5, 147968, XBLOCK=512, num_warps=8, num_stages=1) del buf3 buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1)) buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool ) buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch. float32) triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944, XBLOCK=512, num_warps=8, num_stages=1) del buf6 buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1)) buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch .bool) buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch .float32) triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11, 51200, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1)) return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11) class BoundaryDiscriminatorNew(nn.Module): def __init__(self): super(BoundaryDiscriminatorNew, self).__init__() filter_num_list = [64, 128, 256, 512, 1] self.conv1 = nn.Conv2d(1, filter_num_list[0], kernel_size=4, stride =2, padding=2, bias=False) self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False) self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False) self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False) self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False) self.leakyrelu = nn.LeakyReLU(negative_slope=0.2) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.zero_() def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
EmmaW8/BEAL
BoundaryDiscriminator
false
13,682
[ "MIT" ]
95
945cad38a354605b8bca5bc01ae1b65848d605e1
https://github.com/EmmaW8/BEAL/tree/945cad38a354605b8bca5bc01ae1b65848d605e1
OutputDiscriminator
import torch import torch.nn as nn class OutputDiscriminator(nn.Module): def __init__(self): super(OutputDiscriminator, self).__init__() filter_num_list = [64, 128, 256, 512, 1] self.conv1 = nn.Conv2d(2, filter_num_list[0], kernel_size=4, stride =2, padding=2, bias=False) self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False) self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False) self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False) self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False) self.leakyrelu = nn.LeakyReLU(negative_slope=0.2) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.zero_() def forward(self, x): x = self.leakyrelu(self.conv1(x)) x = self.leakyrelu(self.conv2(x)) x = self.leakyrelu(self.conv3(x)) x = self.leakyrelu(self.conv4(x)) x = self.conv5(x) return x def get_inputs(): return [torch.rand([4, 2, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 278784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 147968 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, None) tl.store(out_ptr1 + x0, tmp5, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (64, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_2, (4, 2, 64, 64), (8192, 4096, 64, 1)) assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1)) buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool) buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2, 278784, XBLOCK=512, num_warps=8, num_stages=1) del buf0 buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1)) buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool) buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5, 147968, XBLOCK=512, num_warps=8, num_stages=1) del buf3 buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1)) buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool ) buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch. float32) triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944, XBLOCK=512, num_warps=8, num_stages=1) del buf6 buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1)) buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch .bool) buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch .float32) triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11, 51200, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1)) return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11) class OutputDiscriminatorNew(nn.Module): def __init__(self): super(OutputDiscriminatorNew, self).__init__() filter_num_list = [64, 128, 256, 512, 1] self.conv1 = nn.Conv2d(2, filter_num_list[0], kernel_size=4, stride =2, padding=2, bias=False) self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False) self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False) self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False) self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False) self.leakyrelu = nn.LeakyReLU(negative_slope=0.2) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.zero_() def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
EmmaW8/BEAL
OutputDiscriminator
false
13,683
[ "MIT" ]
95
945cad38a354605b8bca5bc01ae1b65848d605e1
https://github.com/EmmaW8/BEAL/tree/945cad38a354605b8bca5bc01ae1b65848d605e1
PReLU
import torch import torch.nn as nn from torch.nn.parameter import Parameter import torch.utils.data import torch.cuda from torch.nn import Parameter import torch.optim class PReLU(nn.Module): def __init__(self): super(PReLU, self).__init__() self.alpha = Parameter(torch.tensor(0.25)) def forward(self, x): return nn.ReLU()(x) - self.alpha * nn.ReLU()(-x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.nn.parameter import Parameter import torch.utils.data import torch.cuda from torch.nn import Parameter import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_neg_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp5 = -tmp0 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp4 * tmp6 tmp8 = tmp2 - tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_neg_relu_sub_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class PReLUNew(nn.Module): def __init__(self): super(PReLUNew, self).__init__() self.alpha = Parameter(torch.tensor(0.25)) def forward(self, input_0): primals_2 = self.alpha primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Flamexmt/LMA
PReLU
false
13,684
[ "MIT" ]
321
f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
https://github.com/Flamexmt/LMA/tree/f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
ResidualBlock_noBN
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn import torch.nn.init as init def initialize_weights(net_l, scale=1): if not isinstance(net_l, list): net_l = [net_l] for net in net_l: for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, a=0, mode='fan_in') m.weight.data *= scale if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Linear): init.kaiming_normal_(m.weight, a=0, mode='fan_in') m.weight.data *= scale if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias.data, 0.0) class ResidualBlock_noBN(nn.Module): """Residual block w/o BN ---Conv-ReLU-Conv-+- |________________| """ def __init__(self, nf=64): super(ResidualBlock_noBN, self).__init__() self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) initialize_weights([self.conv1, self.conv2], 0.1) def forward(self, x): identity = x out = F.relu(self.conv1(x), inplace=True) out = self.conv2(out) return identity + out def get_inputs(): return [torch.rand([4, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1)) assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_3, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_add_convolution_1[grid(1048576)](buf3, primals_1, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_2, primals_4, buf1 def initialize_weights(net_l, scale=1): if not isinstance(net_l, list): net_l = [net_l] for net in net_l: for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, a=0, mode='fan_in') m.weight.data *= scale if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Linear): init.kaiming_normal_(m.weight, a=0, mode='fan_in') m.weight.data *= scale if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias.data, 0.0) class ResidualBlock_noBNNew(nn.Module): """Residual block w/o BN ---Conv-ReLU-Conv-+- |________________| """ def __init__(self, nf=64): super(ResidualBlock_noBNNew, self).__init__() self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) initialize_weights([self.conv1, self.conv2], 0.1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
EvgeneyZ/TMNet
ResidualBlock_noBN
false
13,685
[ "Apache-2.0" ]
90
8a42754747c2fa575e9108c13b5018a884f46099
https://github.com/EvgeneyZ/TMNet/tree/8a42754747c2fa575e9108c13b5018a884f46099
ResBlock
import torch import torch.nn as nn def get_same_padding(kernel_size, dilation): kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) padding = (kernel_size - 1) // 2 return padding class ResBlock(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1): super(ResBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=get_same_padding(kernel_size, dilation), dilation=dilation) self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=1, padding=get_same_padding(kernel_size, dilation), dilation=dilation) self.relu = nn.ReLU(inplace=True) self.res_translate = None if not inplanes == planes or not stride == 1: self.res_translate = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride) def forward(self, x): residual = x out = self.relu(self.conv1(x)) out = self.conv2(out) if self.res_translate is not None: residual = self.res_translate(residual) out += residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_add_convolution_1[grid(256)](buf3, primals_5, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_2, primals_4, buf1 def get_same_padding(kernel_size, dilation): kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) padding = (kernel_size - 1) // 2 return padding class ResBlockNew(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1): super(ResBlockNew, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=get_same_padding(kernel_size, dilation), dilation=dilation) self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=1, padding=get_same_padding(kernel_size, dilation), dilation=dilation) self.relu = nn.ReLU(inplace=True) self.res_translate = None if not inplanes == planes or not stride == 1: self.res_translate = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Flemingjp/CDVD-TSP
ResBlock
false
13,686
[ "MIT" ]
232
a2621476deb9386b1bc02570706f490d582930c8
https://github.com/Flemingjp/CDVD-TSP/tree/a2621476deb9386b1bc02570706f490d582930c8
IIDIsotropicGaussianUVLoss
import math import torch import torch.utils.data import torch.nn.functional as F from torch import nn class IIDIsotropicGaussianUVLoss(nn.Module): """ Loss for the case of iid residuals with isotropic covariance: $Sigma_i = sigma_i^2 I$ The loss (negative log likelihood) is then: $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$, where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates difference between estimated and ground truth UV values For details, see: N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 """ def __init__(self, sigma_lower_bound: 'float'): super(IIDIsotropicGaussianUVLoss, self).__init__() self.sigma_lower_bound = sigma_lower_bound self.log2pi = math.log(2 * math.pi) def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u: 'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'): sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2 loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2) return loss.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sigma_lower_bound': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp13 = tl.load(in_ptr1 + r0, None) tmp14 = tl.load(in_ptr2 + r0, None) tmp17 = tl.load(in_ptr3 + r0, None) tmp18 = tl.load(in_ptr4 + r0, None) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 4.0 tmp7 = tmp5 + tmp6 tmp8 = tl_math.log(tmp7) tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = 1.8378770664093453 tmp12 = tmp10 + tmp11 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp16 + tmp20 tmp22 = tmp21 / tmp7 tmp23 = tmp12 + tmp22 tmp24 = 0.5 tmp25 = tmp23 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1 , arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return buf0, class IIDIsotropicGaussianUVLossNew(nn.Module): """ Loss for the case of iid residuals with isotropic covariance: $Sigma_i = sigma_i^2 I$ The loss (negative log likelihood) is then: $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$, where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates difference between estimated and ground truth UV values For details, see: N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 """ def __init__(self, sigma_lower_bound: 'float'): super(IIDIsotropicGaussianUVLossNew, self).__init__() self.sigma_lower_bound = sigma_lower_bound self.log2pi = math.log(2 * math.pi) def forward(self, input_0, input_1, input_2, input_3, input_4): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
FluteXu/DW-Research
IIDIsotropicGaussianUVLoss
false
13,687
[ "Apache-2.0" ]
780
6b559d2d1d440c07e5936a65cd74a3bc657962dc
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
Swish
import torch import torch.nn as nn from torch.nn.parameter import Parameter import torch.utils.data import torch.cuda from torch.nn import Parameter import torch.optim class Swish(nn.Module): def __init__(self, dim): super(Swish, self).__init__() self.betas = Parameter(torch.ones(dim)) self.dim = dim def forward(self, x): pre_size = x.size() return x * nn.Sigmoid()(self.betas.view(-1, self.dim) * x.view(-1, self.dim)).view(pre_size) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter import torch.utils.data import torch.cuda from torch.nn import Parameter import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp0 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2 class SwishNew(nn.Module): def __init__(self, dim): super(SwishNew, self).__init__() self.betas = Parameter(torch.ones(dim)) self.dim = dim def forward(self, input_0): primals_2 = self.betas primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Flamexmt/LMA
Swish
false
13,688
[ "MIT" ]
321
f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
https://github.com/Flamexmt/LMA/tree/f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
Hsigmoid
import torch import torch.utils.data import torch.nn.functional as F from torch import nn class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HsigmoidNew(nn.Module): def __init__(self, inplace=True): super(HsigmoidNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
FluteXu/DW-Research
Hsigmoid
false
13,689
[ "Apache-2.0" ]
780
6b559d2d1d440c07e5936a65cd74a3bc657962dc
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
GlobalAttention
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class GlobalAttention(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. All models compute the output as :math:`c = sum_{j=1}^{SeqLength} a_j H_j` where :math:`a_j` is the softmax of a score function. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)` Args: attn_size (int): dimensionality of query and key attn_type (str): type of attention to use, options [dot,general,mlp] """ def __init__(self, query_size, attn_size, attn_type='dot'): super(GlobalAttention, self).__init__() self.query_size = query_size self.attn_size = attn_size self.attn_type = attn_type if self.attn_type == 'general': self.linear_in = nn.Linear(query_size, attn_size, bias=False) elif self.attn_type == 'mlp': self.linear_query = nn.Linear(query_size, attn_size, bias=True) self.attn_w = nn.Linear(attn_size, 1, bias=False) elif self.attn_type == 'dot': assert self.query_size == self.attn_size def forward(self, query, memory_keys, memory_values, memory_masks): """ Args: query (`FloatTensor`): (batch, query_size) memory_keys (`FloatTensor`): (batch, seq_len, attn_size) memory_values (`FloatTensor`): (batch, seq_len, attn_size) memory_masks (`LongTensor`): (batch, seq_len) Returns: attn_score: attention distributions (batch, seq_len) attn_memory: computed context vector, (batch, attn_size) """ batch_size, seq_len, attn_size = memory_keys.size() if self.attn_type == 'mlp': query_hidden = self.linear_query(query.unsqueeze(1)).expand( batch_size, seq_len, attn_size) attn_hidden = torch.tanh(query_hidden + memory_keys) attn_score = self.attn_w(attn_hidden) elif self.attn_type == 'dot': attn_score = torch.bmm(memory_keys, query.unsqueeze(2)) elif self.attn_type == 'general': query_hidden = self.linear_in(query) attn_score = torch.bmm(memory_keys, query_hidden.unsqueeze(2)) attn_score = attn_score.squeeze(2) if memory_masks is not None: attn_score = attn_score * memory_masks attn_score = attn_score.masked_fill(memory_masks == 0, -1e+18) attn_score = F.softmax(attn_score, dim=1) if memory_masks is not None: attn_score = attn_score.masked_fill(memory_masks == 0, 0) attn_memory = torch.sum(attn_score.unsqueeze(2) * memory_values, 1) return attn_score, attn_memory def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'query_size': 4, 'attn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_eq_masked_fill_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tmp3 * tmp0 tmp5 = -9.999999843067494e+17 tmp6 = tl.where(tmp2, tmp5, tmp4) tmp8 = tmp7 == tmp1 tmp10 = tmp9 * tmp7 tmp11 = tl.where(tmp8, tmp5, tmp10) tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp14 = tmp13 == tmp1 tmp16 = tmp15 * tmp13 tmp17 = tl.where(tmp14, tmp5, tmp16) tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp20 = tmp19 == tmp1 tmp22 = tmp21 * tmp19 tmp23 = tl.where(tmp20, tmp5, tmp22) tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tl.store(out_ptr0 + x0, tmp24, xmask) tl.store(out_ptr1 + x0, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tmp3 * tmp0 tmp5 = -9.999999843067494e+17 tmp6 = tl.where(tmp2, tmp5, tmp4) tmp8 = tmp6 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tmp12 = tl.where(tmp2, tmp1, tmp11) tl.store(in_out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_eq_masked_fill_mul_0[grid(4)](arg2_1, buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_eq_masked_fill_mul_1[grid(16)](buf3, arg2_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg2_1 del buf1 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(16)](buf3, arg3_1, buf4, 16, XBLOCK =16, num_warps=1, num_stages=1) del arg3_1 return buf3, buf4 class GlobalAttentionNew(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. All models compute the output as :math:`c = sum_{j=1}^{SeqLength} a_j H_j` where :math:`a_j` is the softmax of a score function. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)` Args: attn_size (int): dimensionality of query and key attn_type (str): type of attention to use, options [dot,general,mlp] """ def __init__(self, query_size, attn_size, attn_type='dot'): super(GlobalAttentionNew, self).__init__() self.query_size = query_size self.attn_size = attn_size self.attn_type = attn_type if self.attn_type == 'general': self.linear_in = nn.Linear(query_size, attn_size, bias=False) elif self.attn_type == 'mlp': self.linear_query = nn.Linear(query_size, attn_size, bias=True) self.attn_w = nn.Linear(attn_size, 1, bias=False) elif self.attn_type == 'dot': assert self.query_size == self.attn_size def forward(self, input_0, input_1, input_2, input_3): arg1_1 = input_0 arg0_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
Fenkail/hgr_v2t
GlobalAttention
false
13,690
[ "MIT" ]
190
d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
https://github.com/Fenkail/hgr_v2t/tree/d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
Decoder
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class Decoder(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(Decoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.unsqueeze(-1).unsqueeze(-1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) reconstruction = F.sigmoid(self.deconv4(x)) return reconstruction def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'img_channels': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1024, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(131072, 25)](primals_4, buf0, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch .float32) triton_poi_fused_2[grid(2048, 36)](primals_8, buf2, 2048, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32 ) triton_poi_fused_3[grid(128, 36)](primals_10, buf3, 128, 36, XBLOCK =32, YBLOCK=32, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 1024 ), (1, 4), 0), out=buf4) del primals_1 buf5 = buf4 del buf4 buf14 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(4096)](buf5, primals_2, buf14, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups =1, bias=None) assert_size_stride(buf6, (4, 128, 5, 5), (3200, 1, 640, 128)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(12800)](buf7, primals_5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = extern_kernels.convolution(buf7, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 13, 13), (10816, 1, 832, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(43264)](buf9, primals_7, 43264, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 32, 30, 30), (28800, 1, 960, 32)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_7[grid(115200)](buf11, primals_9, 115200, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf12 = extern_kernels.convolution(buf11, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 64, 64), (16384, 1, 256, 4)) buf13 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_8[grid(16, 4096)](buf12, primals_11, buf13, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) del buf12 del primals_11 return buf13, primals_3, buf0, buf1, buf2, buf3, reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf7, buf9, buf11, buf13, buf14 class DecoderNew(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(DecoderNew, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.deconv1.weight primals_5 = self.deconv1.bias primals_6 = self.deconv2.weight primals_7 = self.deconv2.bias primals_8 = self.deconv3.weight primals_9 = self.deconv3.bias primals_10 = self.deconv4.weight primals_11 = self.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
FabianSchuetze/world-models
Decoder
false
13,691
[ "MIT" ]
440
d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
https://github.com/FabianSchuetze/world-models/tree/d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
IndepAnisotropicGaussianUVLoss
import math import torch import torch.utils.data import torch.nn.functional as F from torch import nn class IndepAnisotropicGaussianUVLoss(nn.Module): """ Loss for the case of independent residuals with anisotropic covariances: $Sigma_i = sigma_i^2 I + r_i r_i^T$ The loss (negative log likelihood) is then: $1/2 sum_{i=1}^n (log(2 pi) + log sigma_i^2 (sigma_i^2 + ||r_i||^2) + ||delta_i||^2 / sigma_i^2 - <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$, where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates difference between estimated and ground truth UV values For details, see: N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 """ def __init__(self, sigma_lower_bound: 'float'): super(IndepAnisotropicGaussianUVLoss, self).__init__() self.sigma_lower_bound = sigma_lower_bound self.log2pi = math.log(2 * math.pi) def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u: 'torch.Tensor', kappa_u_est: 'torch.Tensor', kappa_v_est: 'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'): sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound r_sqnorm2 = kappa_u_est ** 2 + kappa_v_est ** 2 delta_u = u - target_u delta_v = v - target_v delta_sqnorm = delta_u ** 2 + delta_v ** 2 delta_u_r_u = delta_u * kappa_u_est delta_v_r_v = delta_v * kappa_v_est delta_r = delta_u_r_u + delta_v_r_v delta_r_sqnorm = delta_r ** 2 denom2 = sigma2 * (sigma2 + r_sqnorm2) loss = 0.5 * (self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2) return loss.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sigma_lower_bound': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp8 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp18 = tl.load(in_ptr3 + r0, None) tmp19 = tl.load(in_ptr4 + r0, None) tmp22 = tl.load(in_ptr5 + r0, None) tmp23 = tl.load(in_ptr6 + r0, None) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 4.0 tmp7 = tmp5 + tmp6 tmp9 = tmp8 * tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = tmp7 + tmp12 tmp14 = tmp7 * tmp13 tmp15 = tl_math.log(tmp14) tmp16 = 1.8378770664093453 tmp17 = tmp15 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp20 tmp24 = tmp22 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tmp21 + tmp25 tmp27 = tmp26 / tmp7 tmp28 = tmp17 + tmp27 tmp29 = tmp20 * tmp8 tmp30 = tmp24 * tmp10 tmp31 = tmp29 + tmp30 tmp32 = tmp31 * tmp31 tmp33 = tmp32 / tmp14 tmp34 = tmp28 - tmp33 tmp35 = 0.5 tmp36 = tmp34 * tmp35 tmp37 = tl.broadcast_to(tmp36, [RBLOCK]) tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0)) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1 , arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 del arg5_1 del arg6_1 return buf1, class IndepAnisotropicGaussianUVLossNew(nn.Module): """ Loss for the case of independent residuals with anisotropic covariances: $Sigma_i = sigma_i^2 I + r_i r_i^T$ The loss (negative log likelihood) is then: $1/2 sum_{i=1}^n (log(2 pi) + log sigma_i^2 (sigma_i^2 + ||r_i||^2) + ||delta_i||^2 / sigma_i^2 - <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$, where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates difference between estimated and ground truth UV values For details, see: N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 """ def __init__(self, sigma_lower_bound: 'float'): super(IndepAnisotropicGaussianUVLossNew, self).__init__() self.sigma_lower_bound = sigma_lower_bound self.log2pi = math.log(2 * math.pi) def forward(self, input_0, input_1, input_2, input_3, input_4, input_5, input_6): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 arg5_1 = input_5 arg6_1 = input_6 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1]) return output[0]
FluteXu/DW-Research
IndepAnisotropicGaussianUVLoss
false
13,692
[ "Apache-2.0" ]
780
6b559d2d1d440c07e5936a65cd74a3bc657962dc
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
Dueling_Critic
import torch import torch.nn.functional as F import torch.nn as nn class Dueling_Critic(nn.Module): def __init__(self, input_size, output_size, hidden_size): super().__init__() self.input_size = input_size self.output_size = output_size self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, 1) self.linear3 = nn.Linear(hidden_size, output_size) def forward(self, x): x1 = F.relu(self.linear1(x)) x2 = F.relu(self.linear1(x)) y1 = self.linear2(x1) y2 = self.linear3(x2) x3 = y1 + y2 - y2.mean(dim=1, keepdim=True) return x3 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x3 = xindex // 64 x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x5, xmask) tmp6 = tl.load(in_ptr2 + (x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (16 + x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (32 + x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr2 + (48 + x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp12 = tmp10 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = tmp5 - tmp14 tl.store(out_ptr0 + x5, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mean_sub_1[grid(256)](buf2, primals_5, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), primals_6, primals_4, buf5 class Dueling_CriticNew(nn.Module): def __init__(self, input_size, output_size, hidden_size): super().__init__() self.input_size = input_size self.output_size = output_size self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, 1) self.linear3 = nn.Linear(hidden_size, output_size) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
FlickerNiko/ai_lib
Dueling_Critic
false
13,693
[ "MIT" ]
99
7087d4569c9a827d35dd8735b55a080834d31a82
https://github.com/FlickerNiko/ai_lib/tree/7087d4569c9a827d35dd8735b55a080834d31a82
BoundaryEntDiscriminator
import torch import torch.nn as nn class BoundaryEntDiscriminator(nn.Module): def __init__(self): super(BoundaryEntDiscriminator, self).__init__() filter_num_list = [64, 128, 256, 512, 1] self.conv1 = nn.Conv2d(3, filter_num_list[0], kernel_size=4, stride =2, padding=2, bias=False) self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False) self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False) self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False) self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False) self.leakyrelu = nn.LeakyReLU(negative_slope=0.2) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.zero_() def forward(self, x): x = self.leakyrelu(self.conv1(x)) x = self.leakyrelu(self.conv2(x)) x = self.leakyrelu(self.conv3(x)) x = self.leakyrelu(self.conv4(x)) x = self.conv5(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 278784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 147968 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, None) tl.store(out_ptr1 + x0, tmp5, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (64, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1)) buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool) buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2, 278784, XBLOCK=512, num_warps=8, num_stages=1) del buf0 buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1)) buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool) buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5, 147968, XBLOCK=512, num_warps=8, num_stages=1) del buf3 buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1)) buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool ) buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch. float32) triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944, XBLOCK=512, num_warps=8, num_stages=1) del buf6 buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1)) buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch .bool) buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch .float32) triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11, 51200, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1)) return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11) class BoundaryEntDiscriminatorNew(nn.Module): def __init__(self): super(BoundaryEntDiscriminatorNew, self).__init__() filter_num_list = [64, 128, 256, 512, 1] self.conv1 = nn.Conv2d(3, filter_num_list[0], kernel_size=4, stride =2, padding=2, bias=False) self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False) self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False) self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False) self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False) self.leakyrelu = nn.LeakyReLU(negative_slope=0.2) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.zero_() def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
EmmaW8/BEAL
BoundaryEntDiscriminator
false
13,694
[ "MIT" ]
95
945cad38a354605b8bca5bc01ae1b65848d605e1
https://github.com/EmmaW8/BEAL/tree/945cad38a354605b8bca5bc01ae1b65848d605e1
CombineSlices
import torch from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.optim import torch.fft class CombineSlices(nn.Module): def __init__(self, slice_dim=2): super().__init__() self.slice_dim = slice_dim def forward(self, x): return torch.index_select(x, dim=self.slice_dim, index=torch.tensor (0, device=x.device)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.optim import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_select_0[grid(64)](arg0_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 return buf0, class CombineSlicesNew(nn.Module): def __init__(self, slice_dim=2): super().__init__() self.slice_dim = slice_dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Gaskell-1206/fastMRI
CombineSlices
false
13,695
[ "MIT" ]
815
1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
https://github.com/Gaskell-1206/fastMRI/tree/1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
AttnGCNLayer
import math import torch import torch.nn as nn import torch.utils.data class GCNLayer(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, node_fts, rel_edges): """Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds class AttnGCNLayer(GCNLayer): def __init__(self, embed_size, d_ff, dropout=0.0): super().__init__(embed_size, dropout=dropout) self.edge_attn_query = nn.Linear(embed_size, d_ff) self.edge_attn_key = nn.Linear(embed_size, d_ff) self.attn_denominator = math.sqrt(d_ff) def forward(self, node_fts, rel_edges): """ Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ attn_scores = torch.einsum('bod,bid->boi', self.edge_attn_query( node_fts), self.edge_attn_key(node_fts)) / self.attn_denominator attn_scores = attn_scores.masked_fill(rel_edges == 0, -1e+18) attn_scores = torch.softmax(attn_scores, dim=2) attn_scores = attn_scores.masked_fill(rel_edges == 0, 0) ctx_embeds = self.ctx_layer(torch.bmm(attn_scores, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_size': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp0, tmp11, tmp10) tl.store(in_out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(64)](primals_6, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(16)](buf3, buf2, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf2 del buf2 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf3, buf4, buf5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf7, primals_3, out=buf8) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf9) buf10 = buf5 del buf5 buf11 = buf4 del buf4 triton_poi_fused_add_native_layer_norm_3[grid(16)](primals_3, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](primals_3, buf9, buf10, buf11, primals_8, primals_9, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_9 return buf12, primals_3, primals_8, buf3, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, primals_7, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) class GCNLayer(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, node_fts, rel_edges): """Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds class AttnGCNLayerNew(GCNLayer): def __init__(self, embed_size, d_ff, dropout=0.0): super().__init__(embed_size, dropout=dropout) self.edge_attn_query = nn.Linear(embed_size, d_ff) self.edge_attn_key = nn.Linear(embed_size, d_ff) self.attn_denominator = math.sqrt(d_ff) def forward(self, input_0, input_1): primals_1 = self.ctx_layer.weight primals_2 = self.layernorm.weight primals_5 = self.layernorm.bias primals_4 = self.edge_attn_query.weight primals_8 = self.edge_attn_query.bias primals_7 = self.edge_attn_key.weight primals_9 = self.edge_attn_key.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Fenkail/hgr_v2t
AttnGCNLayer
false
13,696
[ "MIT" ]
190
d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
https://github.com/Fenkail/hgr_v2t/tree/d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
Cartesian
import torch from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.optim import torch.fft class Cartesian(nn.Module): def forward(self, x): r, phi = x[..., 0], x[..., 1] return torch.stack((r * torch.cos(phi), r * torch.sin(phi)), dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.optim import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl_math.cos(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr0 + (1 + 4 * x1), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tl_math.sin(tmp15) tmp17 = tmp14 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp10, tmp19) tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class CartesianNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Gaskell-1206/fastMRI
Cartesian
false
13,697
[ "MIT" ]
815
1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
https://github.com/Gaskell-1206/fastMRI/tree/1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
LandmarkHead
import torch from itertools import product as product import torch.nn as nn class LandmarkHead(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(LandmarkHead, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size= (1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 10) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from itertools import product as product import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 30 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (30, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (30,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 30, 64, 64), (122880, 1, 1920, 30)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 30), (122880, 1920, 30, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 12288, 10), (122880, 10, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(491520)](buf3, primals_2, 491520, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class LandmarkHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(LandmarkHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size= (1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Edward1900/Face-Detector-1MB-with-landmark
LandmarkHead
false
13,698
[ "MIT" ]
907
16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
https://github.com/Edward1900/Face-Detector-1MB-with-landmark/tree/16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
TransformerNet
import torch class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out class TransformerNet(torch.nn.Module): """ From https://github.com/pytorch/examples/blob/master/fast_neural_style/neural_style/transformer_net.py """ def __init__(self): super(TransformerNet, self).__init__() self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2) self.in3 = torch.nn.InstanceNorm2d(128, affine=True) self.res1 = ResidualBlock(128) self.res2 = ResidualBlock(128) self.res3 = ResidualBlock(128) self.res4 = ResidualBlock(128) self.res5 = ResidualBlock(128) self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) self.in4 = torch.nn.InstanceNorm2d(64, affine=True) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) self.relu = torch.nn.ReLU() def forward(self, X): y = self.relu(self.in1(self.conv1(X))) y = self.relu(self.in2(self.conv2(y))) y = self.relu(self.in3(self.conv3(y))) y = self.res1(y) y = self.res2(y) y = self.res3(y) y = self.res4(y) y = self.res5(y) y = self.relu(self.in4(self.deconv1(y))) y = self.relu(self.in5(self.deconv2(y))) y = self.deconv3(y) return y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 62208 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 32 tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6_tmp[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 32, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 557568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x2 = xindex // 4356 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_4(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 1024, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1024.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_repeat_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 64, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 128 tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0 % 128, None, eviction_policy='evict_last') tmp2 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None) tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = tl.broadcast_to(tmp5, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.full([1], 256, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp5 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp4 - tmp12 tmp24 = tmp23 * tmp22 tmp25 = tmp24 * tmp0 tmp26 = tmp25 + tmp1 tmp27 = tl.full([1], 0, tl.int32) tmp28 = triton_helpers.maximum(tmp27, tmp26) tl.store(out_ptr0 + x0, tmp0, None) tl.store(out_ptr1 + x0, tmp1, None) tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp4, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp22, None) tl.store(out_ptr3 + (r3 + 256 * x0), tmp28, None) tl.store(out_ptr2 + x0, tmp12, None) @triton.jit def triton_poi_fused_reflection_pad2d_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_9(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_repeat_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 128, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 128 tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp27 = tl.load(in_out_ptr1 + (r3 + 256 * x0), None) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = tl.broadcast_to(tmp4, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.full([1], 256, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp3 - tmp11 tmp18 = 256.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tmp23 * tmp0 tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tl.store(out_ptr0 + x0, tmp0, None) tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp3, None) tl.store(in_out_ptr1 + (r3 + 256 * x0), tmp28, None) tl.store(out_ptr3 + x0, tmp22, None) tl.store(out_ptr1 + x0, tmp11, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_13(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None) tl.store(out_ptr2 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) tl.store(out_ptr1 + x3, tmp15, None) @triton.jit def triton_poi_fused_arange_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_reflection_pad2d_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 34 % 34 x0 = xindex % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp11 * tmp17 tmp20 = tmp18 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr6 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp24 = tmp22 + tmp23 tl.store(out_ptr0 + x7, tmp24, None) @triton.jit def triton_poi_fused_arange_17(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_relu_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 66 % 66 x0 = xindex % 66 x2 = xindex // 4356 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2), xmask, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tl.store(out_ptr0 + x5, tmp19, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63 ) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (32,), (1,)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64,), (1,)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128,), (1,)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (128,), (1,)) assert_size_stride(primals_24, (128,), (1,)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (128,), (1,)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (128,), (1,)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128,), (1,)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (128,), (1,)) assert_size_stride(primals_40, (128,), (1,)) assert_size_stride(primals_41, (128,), (1,)) assert_size_stride(primals_42, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_43, (128,), (1,)) assert_size_stride(primals_44, (128,), (1,)) assert_size_stride(primals_45, (128,), (1,)) assert_size_stride(primals_46, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_47, (128,), (1,)) assert_size_stride(primals_48, (128,), (1,)) assert_size_stride(primals_49, (128,), (1,)) assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_51, (128,), (1,)) assert_size_stride(primals_52, (128,), (1,)) assert_size_stride(primals_53, (128,), (1,)) assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_55, (64,), (1,)) assert_size_stride(primals_56, (64,), (1,)) assert_size_stride(primals_57, (64,), (1,)) assert_size_stride(primals_58, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_59, (32,), (1,)) assert_size_stride(primals_60, (32,), (1,)) assert_size_stride(primals_61, (32,), (1,)) assert_size_stride(primals_62, (3, 32, 9, 9), (2592, 81, 9, 1)) assert_size_stride(primals_63, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 72, 72), (15552, 5184, 72, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(62208)](primals_1, buf0, 62208, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf2 = buf1 del buf1 buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32 ) buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch .float32) buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf6 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)](buf2 , buf8, primals_3, buf5, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_3 buf3 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_4, buf3, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_4 buf4 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_5, buf4, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 4356, 66, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_3[grid(557568)](buf2, buf5, buf8, buf3, buf4, buf9, 557568, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf11 = buf10 del buf10 buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf15 triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)]( buf11, buf17, primals_7, buf14, 256, 1024, num_warps=8, num_stages=1) del primals_7 buf12 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_8, buf12, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_8 buf13 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_9, buf13, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf18 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_6[grid(295936)](buf11, buf14, buf17, buf12, buf13, buf18, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf21 = empty_strided_cuda((512,), (1,), torch.float32) buf22 = empty_strided_cuda((512,), (1,), torch.float32) buf20 = buf19 del buf19 buf23 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf24 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf26 = reinterpret_tensor(buf24, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf24 buf27 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7[ grid(512)](buf20, buf26, primals_12, primals_13, primals_11, buf21, buf22, buf23, buf27, 512, 256, num_warps=2, num_stages=1) del primals_11 del primals_12 del primals_13 buf28 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf27, buf28, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf29 = extern_kernels.convolution(buf28, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 16, 16), (32768, 256, 16, 1)) buf30 = buf29 del buf29 buf33 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf34 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf36 = reinterpret_tensor(buf34, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf34 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf30, buf36, primals_15, buf33, 512, 256, num_warps=2, num_stages=1) del primals_15 buf31 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_16, buf31, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_16 buf32 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_17, buf32, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf37 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf30, buf33, buf36, buf31, buf32, buf37, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1)) buf40 = empty_strided_cuda((512,), (1,), torch.float32) buf39 = buf38 del buf38 buf41 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf45 = buf27 del buf27 buf44 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf39, buf45, primals_20, primals_19, primals_21, buf40, buf41, buf44, 512, 256, num_warps=2, num_stages=1) del primals_19 del primals_20 del primals_21 buf46 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf45, buf46, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 128, 16, 16), (32768, 256, 16, 1)) buf48 = buf47 del buf47 buf51 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf52 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf54 = reinterpret_tensor(buf52, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf52 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf48, buf54, primals_23, buf51, 512, 256, num_warps=2, num_stages=1) del primals_23 buf49 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_24, buf49, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_24 buf50 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_25, buf50, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf55 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf48, buf51, buf54, buf49, buf50, buf55, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf55, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 128, 16, 16), (32768, 256, 16, 1)) buf58 = empty_strided_cuda((512,), (1,), torch.float32) buf57 = buf56 del buf56 buf59 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf63 = buf45 del buf45 buf62 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf57, buf63, primals_28, primals_27, primals_29, buf58, buf59, buf62, 512, 256, num_warps=2, num_stages=1) del primals_27 del primals_28 del primals_29 buf64 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf63, buf64, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf65 = extern_kernels.convolution(buf64, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 128, 16, 16), (32768, 256, 16, 1)) buf66 = buf65 del buf65 buf69 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf70 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf72 = reinterpret_tensor(buf70, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf70 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf66, buf72, primals_31, buf69, 512, 256, num_warps=2, num_stages=1) del primals_31 buf67 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_32, buf67, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_32 buf68 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_33, buf68, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_33 buf73 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf66, buf69, buf72, buf67, buf68, buf73, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf74 = extern_kernels.convolution(buf73, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf74, (4, 128, 16, 16), (32768, 256, 16, 1)) buf76 = empty_strided_cuda((512,), (1,), torch.float32) buf75 = buf74 del buf74 buf77 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf81 = buf63 del buf63 buf80 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf75, buf81, primals_36, primals_35, primals_37, buf76, buf77, buf80, 512, 256, num_warps=2, num_stages=1) del primals_35 del primals_36 del primals_37 buf82 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf81, buf82, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf83 = extern_kernels.convolution(buf82, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1)) buf84 = buf83 del buf83 buf87 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf88 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf90 = reinterpret_tensor(buf88, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf88 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf84, buf90, primals_39, buf87, 512, 256, num_warps=2, num_stages=1) del primals_39 buf85 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_40, buf85, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_40 buf86 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_41, buf86, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_41 buf91 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf84, buf87, buf90, buf85, buf86, buf91, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf92 = extern_kernels.convolution(buf91, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf92, (4, 128, 16, 16), (32768, 256, 16, 1)) buf94 = empty_strided_cuda((512,), (1,), torch.float32) buf93 = buf92 del buf92 buf95 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf99 = buf81 del buf81 buf98 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf93, buf99, primals_44, primals_43, primals_45, buf94, buf95, buf98, 512, 256, num_warps=2, num_stages=1) del primals_43 del primals_44 del primals_45 buf100 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf99, buf100, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf101 = extern_kernels.convolution(buf100, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf101, (4, 128, 16, 16), (32768, 256, 16, 1)) buf102 = buf101 del buf101 buf105 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf106 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf108 = reinterpret_tensor(buf106, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf106 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf102, buf108, primals_47, buf105, 512, 256, num_warps=2, num_stages=1) del primals_47 buf103 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_48, buf103, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_48 buf104 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_49, buf104, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_49 buf109 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf102, buf105, buf108, buf103, buf104, buf109, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf110 = extern_kernels.convolution(buf109, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf110, (4, 128, 16, 16), (32768, 256, 16, 1)) buf111 = buf110 del buf110 buf113 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf114 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf116 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_13[grid(512)]( buf111, primals_51, buf113, buf114, buf116, 512, 256, num_warps =2, num_stages=1) del primals_51 buf112 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_52, buf112, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_52 buf117 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_arange_14[grid(32)](buf117, 32, XBLOCK=32, num_warps=1, num_stages=1) buf118 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_15[grid(32)](buf118, 32, XBLOCK=32, num_warps=1, num_stages=1) buf119 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) triton_poi_fused__unsafe_index_add_reflection_pad2d_16[grid(591872)]( buf118, buf111, buf113, buf114, buf112, primals_53, buf99, buf119, 591872, XBLOCK=512, num_warps=8, num_stages=1) del buf114 del buf99 del primals_53 buf120 = extern_kernels.convolution(buf119, primals_54, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf121 = buf120 del buf120 buf124 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf125 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf127 = reinterpret_tensor(buf125, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf125 triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)]( buf121, buf127, primals_55, buf124, 256, 1024, num_warps=8, num_stages=1) del primals_55 buf122 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_56, buf122, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_56 buf123 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_57, buf123, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_57 buf128 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_arange_17[grid(64)](buf128, 64, XBLOCK=64, num_warps=1, num_stages=1) buf129 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_18[grid(64)](buf129, 64, XBLOCK=64, num_warps=1, num_stages=1) buf130 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_reflection_pad2d_relu_19[grid(1115136)]( buf129, buf121, buf124, buf127, buf122, buf123, buf130, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) buf131 = extern_kernels.convolution(buf130, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf132 = buf131 del buf131 buf135 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch. float32) buf136 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32) buf138 = reinterpret_tensor(buf136, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf136 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)]( buf132, buf138, primals_59, buf135, 128, 4096, XBLOCK=1, RBLOCK =2048, num_warps=16, num_stages=1) del primals_59 buf133 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_60, buf133, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_60 buf134 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_61, buf134, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_61 buf139 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_20[grid(663552)](buf132, buf135, buf138, buf133, buf134, buf139, 663552, XBLOCK=1024, num_warps=4, num_stages=1) buf140 = extern_kernels.convolution(buf139, primals_62, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf140, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf141 = buf140 del buf140 triton_poi_fused_convolution_21[grid(49152)](buf141, primals_63, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_63 return (buf141, primals_2, primals_6, primals_10, primals_14, primals_18, primals_22, primals_26, primals_30, primals_34, primals_38, primals_42, primals_46, primals_50, primals_54, primals_58, primals_62, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, buf13, buf14, buf17, buf18, buf20, buf21, buf22, buf23, buf26, buf28, buf30, buf31, buf32, buf33, buf36, buf37, buf39, buf40, reinterpret_tensor(buf44, (512,), (1,), 0), buf46, buf48, buf49, buf50, buf51, buf54, buf55, buf57, buf58, reinterpret_tensor(buf62, (512,), (1,), 0), buf64, buf66, buf67, buf68, buf69, buf72, buf73, buf75, buf76, reinterpret_tensor(buf80, (512,), (1,), 0), buf82, buf84, buf85, buf86, buf87, buf90, buf91, buf93, buf94, reinterpret_tensor(buf98, (512,), (1,), 0), buf100, buf102, buf103, buf104, buf105, buf108, buf109, buf111, buf112, reinterpret_tensor(buf116, (512,), (1,), 0), buf117, buf118, buf119, buf121, buf122, buf123, buf124, buf127, buf128, buf129, buf130, buf132, buf133, buf134, buf135, buf138, buf139, reinterpret_tensor( buf113, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor( buf95, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf77, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf59, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf41, (1, 512, 1, 1), (512, 1, 1, 1), 0)) class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out class TransformerNetNew(torch.nn.Module): """ From https://github.com/pytorch/examples/blob/master/fast_neural_style/neural_style/transformer_net.py """ def __init__(self): super(TransformerNetNew, self).__init__() self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2) self.in3 = torch.nn.InstanceNorm2d(128, affine=True) self.res1 = ResidualBlock(128) self.res2 = ResidualBlock(128) self.res3 = ResidualBlock(128) self.res4 = ResidualBlock(128) self.res5 = ResidualBlock(128) self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) self.in4 = torch.nn.InstanceNorm2d(64, affine=True) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv1.conv2d.bias primals_4 = self.in1.weight primals_5 = self.in1.bias primals_6 = self.conv2.conv2d.weight primals_7 = self.conv2.conv2d.bias primals_8 = self.in2.weight primals_9 = self.in2.bias primals_10 = self.conv3.conv2d.weight primals_11 = self.conv3.conv2d.bias primals_12 = self.in3.weight primals_13 = self.in3.bias primals_14 = self.res1.conv1.conv2d.weight primals_15 = self.res1.conv1.conv2d.bias primals_16 = self.res1.in1.weight primals_17 = self.res1.in1.bias primals_18 = self.res1.conv2.conv2d.weight primals_19 = self.res1.conv2.conv2d.bias primals_20 = self.res1.in2.weight primals_21 = self.res1.in2.bias primals_22 = self.res2.conv1.conv2d.weight primals_23 = self.res2.conv1.conv2d.bias primals_24 = self.res2.in1.weight primals_25 = self.res2.in1.bias primals_26 = self.res2.conv2.conv2d.weight primals_27 = self.res2.conv2.conv2d.bias primals_28 = self.res2.in2.weight primals_29 = self.res2.in2.bias primals_30 = self.res3.conv1.conv2d.weight primals_31 = self.res3.conv1.conv2d.bias primals_32 = self.res3.in1.weight primals_33 = self.res3.in1.bias primals_34 = self.res3.conv2.conv2d.weight primals_35 = self.res3.conv2.conv2d.bias primals_36 = self.res3.in2.weight primals_37 = self.res3.in2.bias primals_38 = self.res4.conv1.conv2d.weight primals_39 = self.res4.conv1.conv2d.bias primals_40 = self.res4.in1.weight primals_41 = self.res4.in1.bias primals_42 = self.res4.conv2.conv2d.weight primals_43 = self.res4.conv2.conv2d.bias primals_44 = self.res4.in2.weight primals_45 = self.res4.in2.bias primals_46 = self.res5.conv1.conv2d.weight primals_47 = self.res5.conv1.conv2d.bias primals_48 = self.res5.in1.weight primals_49 = self.res5.in1.bias primals_50 = self.res5.conv2.conv2d.weight primals_51 = self.res5.conv2.conv2d.bias primals_52 = self.res5.in2.weight primals_53 = self.res5.in2.bias primals_54 = self.deconv1.conv2d.weight primals_55 = self.deconv1.conv2d.bias primals_56 = self.in4.weight primals_57 = self.in4.bias primals_58 = self.deconv2.conv2d.weight primals_59 = self.deconv2.conv2d.bias primals_60 = self.in5.weight primals_61 = self.in5.bias primals_62 = self.deconv3.conv2d.weight primals_63 = self.deconv3.conv2d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63]) return output[0]
EdenBD/MultiModalStory-demo
TransformerNet
false
13,699
[ "Apache-2.0" ]
154
5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
https://github.com/EdenBD/MultiModalStory-demo/tree/5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
CategoricalActor
import torch from torch.distributions import Categorical import torch.nn.functional as F import torch.nn as nn def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class CategoricalActor(nn.Module): def __init__(self, state_dim, hidden_dim, action_dim): super(CategoricalActor, self).__init__() self.linear1 = nn.Linear(state_dim, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, action_dim) self.apply(weights_init_) def forward(self, state): x = F.relu(self.linear1(state)) x = F.relu(self.linear2(x)) x = self.linear3(x) prob = F.softmax(x, -1) return prob def sample(self, state): prob = self.forward(state) distribution = Categorical(probs=prob) sample_action = distribution.sample().unsqueeze(-1) z = (prob == 0.0).float() * 1e-08 logprob = torch.log(prob + z) greedy = torch.argmax(prob, dim=-1).unsqueeze(-1) return sample_action, prob, logprob, greedy def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'hidden_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.distributions import Categorical import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8 def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class CategoricalActorNew(nn.Module): def __init__(self, state_dim, hidden_dim, action_dim): super(CategoricalActorNew, self).__init__() self.linear1 = nn.Linear(state_dim, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, action_dim) self.apply(weights_init_) def sample(self, state): prob = self.forward(state) distribution = Categorical(probs=prob) sample_action = distribution.sample().unsqueeze(-1) z = (prob == 0.0).float() * 1e-08 logprob = torch.log(prob + z) greedy = torch.argmax(prob, dim=-1).unsqueeze(-1) return sample_action, prob, logprob, greedy def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
FlickerNiko/ai_lib
CategoricalActor
false
13,700
[ "MIT" ]
99
7087d4569c9a827d35dd8735b55a080834d31a82
https://github.com/FlickerNiko/ai_lib/tree/7087d4569c9a827d35dd8735b55a080834d31a82
ClassHead
import torch from itertools import product as product import torch.nn as nn class ClassHead(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(ClassHead, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 2) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from itertools import product as product import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 6 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 6), (24576, 384, 6, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 12288, 2), (24576, 2, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(98304)](buf3, primals_2, 98304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class ClassHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(ClassHeadNew, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Edward1900/Face-Detector-1MB-with-landmark
ClassHead
false
13,701
[ "MIT" ]
907
16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
https://github.com/Edward1900/Face-Detector-1MB-with-landmark/tree/16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
BboxHead
import torch from itertools import product as product import torch.nn as nn class BboxHead(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(BboxHead, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 4) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from itertools import product as product import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 12 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 12), (49152, 768, 12, 1), 0 ) del buf1 buf3 = reinterpret_tensor(buf2, (4, 12288, 4), (49152, 4, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(196608)](buf3, primals_2, 196608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class BboxHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(BboxHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Edward1900/Face-Detector-1MB-with-landmark
BboxHead
false
13,702
[ "MIT" ]
907
16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
https://github.com/Edward1900/Face-Detector-1MB-with-landmark/tree/16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
openai_critic
import torch import torch.nn as nn class openai_critic(nn.Module): def __init__(self, obs_shape_n, action_shape_n): super(openai_critic, self).__init__() self.LReLU = nn.LeakyReLU(0.01) self.linear_c1 = nn.Linear(action_shape_n + obs_shape_n, 128) self.linear_c2 = nn.Linear(128, 64) self.linear_c = nn.Linear(64, 1) self.reset_parameters() self.train() def reset_parameters(self): nn.init.calculate_gain('leaky_relu') nn.init.xavier_uniform_(self.linear_c1.weight, gain=nn.init. calculate_gain('leaky_relu')) nn.init.xavier_uniform_(self.linear_c2.weight, gain=nn.init. calculate_gain('leaky_relu')) nn.init.xavier_uniform_(self.linear_c.weight, gain=nn.init. calculate_gain('leaky_relu')) def forward(self, obs_input, action_input): x_cat = self.LReLU(self.linear_c1(torch.cat([obs_input, action_input], dim=1))) x = self.LReLU(self.linear_c2(x_cat)) value = self.linear_c(x) return value def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'obs_shape_n': 4, 'action_shape_n': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (128, 8), (8, 1)) assert_size_stride(primals_4, (128,), (1,)) assert_size_stride(primals_5, (64, 128), (128, 1)) assert_size_stride(primals_6, (64,), (1,)) assert_size_stride(primals_7, (1, 64), (64, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 128), (1, 8), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.bool) buf3 = empty_strided_cuda((4, 128), (128, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_4, buf2, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (128, 64), (1, 128), 0), out=buf4) buf5 = empty_strided_cuda((4, 64), (64, 1), torch.bool) buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(256)](buf4, primals_6, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_6 buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf8) del primals_8 return buf8, buf0, buf2, buf3, buf5, buf6, primals_7, primals_5 class openai_criticNew(nn.Module): def __init__(self, obs_shape_n, action_shape_n): super(openai_criticNew, self).__init__() self.LReLU = nn.LeakyReLU(0.01) self.linear_c1 = nn.Linear(action_shape_n + obs_shape_n, 128) self.linear_c2 = nn.Linear(128, 64) self.linear_c = nn.Linear(64, 1) self.reset_parameters() self.train() def reset_parameters(self): nn.init.calculate_gain('leaky_relu') nn.init.xavier_uniform_(self.linear_c1.weight, gain=nn.init. calculate_gain('leaky_relu')) nn.init.xavier_uniform_(self.linear_c2.weight, gain=nn.init. calculate_gain('leaky_relu')) nn.init.xavier_uniform_(self.linear_c.weight, gain=nn.init. calculate_gain('leaky_relu')) def forward(self, input_0, input_1): primals_3 = self.linear_c1.weight primals_4 = self.linear_c1.bias primals_5 = self.linear_c2.weight primals_6 = self.linear_c2.bias primals_7 = self.linear_c.weight primals_8 = self.linear_c.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
FlickerNiko/ai_lib
openai_critic
false
13,703
[ "MIT" ]
99
7087d4569c9a827d35dd8735b55a080834d31a82
https://github.com/FlickerNiko/ai_lib/tree/7087d4569c9a827d35dd8735b55a080834d31a82
eSEModule
import torch import torch.utils.data import torch.nn.functional as F from torch import nn class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 class eSEModule(nn.Module): def __init__(self, channel, reduction=4): super(eSEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) self.hsigmoid = Hsigmoid() def forward(self, x): input = x x = self.avg_pool(x) x = self.fc(x) x = self.hsigmoid(x) return input * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 3.0 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tmp10 = 0.16666666666666666 tmp11 = tmp9 * tmp10 tmp12 = tmp0 * tmp11 tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 3.0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp7 = 6.0 tmp8 = tmp4 >= tmp7 tmp9 = tmp6 | tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_convolution_div_hardtanh_mul_1[grid(256)]( primals_1, buf2, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_add_convolution_hardtanh_backward_2[grid(16)](buf2, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 del primals_3 return buf3, primals_1, primals_2, buf1, buf4 class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 class eSEModuleNew(nn.Module): def __init__(self, channel, reduction=4): super(eSEModuleNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) self.hsigmoid = Hsigmoid() def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
FluteXu/DW-Research
eSEModule
false
13,704
[ "Apache-2.0" ]
780
6b559d2d1d440c07e5936a65cd74a3bc657962dc
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
Encoder
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(Encoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'img_channels': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 1024), (1024, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch .float32) triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch. float32) triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(123008)](buf6, primals_2, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 14, 14), (12544, 1, 896, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(50176)](buf8, primals_5, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 6, 6), (4608, 1, 768, 128)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(18432)](buf10, primals_7, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 2, 2), (1024, 1, 512, 256)) buf12 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(1024, 4)]( buf11, primals_9, buf12, buf15, 1024, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del buf11 del primals_9 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf13) del primals_11 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf14) del primals_13 return (buf13, buf14, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), primals_12, primals_10, buf15) class EncoderNew(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(EncoderNew, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc_mu.weight primals_11 = self.fc_mu.bias primals_12 = self.fc_logsigma.weight primals_13 = self.fc_logsigma.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
FabianSchuetze/world-models
Encoder
false
13,705
[ "MIT" ]
440
d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
https://github.com/FabianSchuetze/world-models/tree/d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
ToRGB
from torch.autograd import Function import torch from torch import nn from torch.nn import functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, upsample=True, resolution=None, blur_kernel=[1, 3, 3, 1]): super().__init__() self.is_upsample = upsample self.resolution = resolution if upsample: self.upsample = Upsample(blur_kernel) self.conv = nn.Conv2d(in_channel, 3, kernel_size=1) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, skip=None): out = self.conv(input) out = out + self.bias if skip is not None: if self.is_upsample: skip = self.upsample(skip) out = out + skip return out def flops(self): m = self.conv kernel_ops = torch.zeros(m.weight.size()[2:]).numel() bias_ops = 1 flops = 1 * self.resolution * self.resolution * 3 * (m.in_channels // m.groups * kernel_ops + bias_ops) if self.is_upsample: w_shape = 1, 1, 4, 4 kernel_ops = torch.zeros(w_shape[2:]).numel() flops = 1 * 3 * self.resolution * self.resolution * (3 * kernel_ops ) return flops def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 4, 4), (48, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(192)](buf1, primals_2, primals_4, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 return buf1, primals_1, primals_3 def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, upsample=True, resolution=None, blur_kernel=[1, 3, 3, 1]): super().__init__() self.is_upsample = upsample self.resolution = resolution if upsample: self.upsample = Upsample(blur_kernel) self.conv = nn.Conv2d(in_channel, 3, kernel_size=1) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def flops(self): m = self.conv kernel_ops = torch.zeros(m.weight.size()[2:]).numel() bias_ops = 1 flops = 1 * self.resolution * self.resolution * 3 * (m.in_channels // m.groups * kernel_ops + bias_ops) if self.is_upsample: w_shape = 1, 1, 4, 4 kernel_ops = torch.zeros(w_shape[2:]).numel() flops = 1 * 3 * self.resolution * self.resolution * (3 * kernel_ops ) return flops def forward(self, input_0): primals_4 = self.bias primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
G-arj/StyleSwin
ToRGB
false
13,706
[ "MIT" ]
398
0c592b3334159613ebe4a33bd6c4ea042dac42d4
https://github.com/G-arj/StyleSwin/tree/0c592b3334159613ebe4a33bd6c4ea042dac42d4
AdaptiveInstanceNorm
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F from torch.cuda.amp import custom_fwd from torch.cuda.amp import custom_bwd def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): if input.device.type == 'cpu': rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2) * scale else: return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod @custom_bwd def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class AdaptiveInstanceNorm(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm1d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) def forward(self, input, style): style = self.style(style).unsqueeze(-1) gamma, beta = style.chunk(2, 1) out = self.norm(input) out = gamma * out + beta return out def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math from torch import nn from torch.nn import functional as F from torch.cuda.amp import custom_fwd from torch.cuda.amp import custom_bwd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 + x1 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 - tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp4 * tmp9 tmp13 = tmp12 * tmp2 tmp14 = tmp11 + tmp13 tmp15 = tmp10 + tmp14 tl.store(out_ptr0 + x4, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4 ), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf3 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) triton_poi_fused__native_batch_norm_legit_1[grid(4)](primals_4, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_2[grid(64)](buf1, primals_2, primals_4, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del buf2 del buf3 del primals_2 return buf4, primals_3, primals_4 def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): if input.device.type == 'cpu': rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2) * scale else: return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod @custom_bwd def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class AdaptiveInstanceNormNew(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm1d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) def forward(self, input_0, input_1): primals_1 = self.style.weight primals_2 = self.style.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
G-arj/StyleSwin
AdaptiveInstanceNorm
false
13,707
[ "MIT" ]
398
0c592b3334159613ebe4a33bd6c4ea042dac42d4
https://github.com/G-arj/StyleSwin/tree/0c592b3334159613ebe4a33bd6c4ea042dac42d4
VAE
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class Decoder(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(Decoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.unsqueeze(-1).unsqueeze(-1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) reconstruction = F.sigmoid(self.deconv4(x)) return reconstruction class Encoder(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(Encoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma class VAE(nn.Module): """ Variational Autoencoder """ def __init__(self, img_channels, latent_size): super(VAE, self).__init__() self.encoder = Encoder(img_channels, latent_size) self.decoder = Decoder(img_channels, latent_size) def forward(self, x): mu, logsigma = self.encoder(x) sigma = logsigma.exp() eps = torch.randn_like(sigma) z = eps.mul(sigma).add_(mu) recon_x = self.decoder(z) return recon_x, mu, logsigma def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'img_channels': 4, 'latent_size': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask) @triton.jit def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 1024), (1024, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (1024, 4), (4, 1)) assert_size_stride(primals_15, (1024,), (1,)) assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_21, (32,), (1,)) assert_size_stride(primals_22, (32, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_23, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch .float32) triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch. float32) triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_5[grid(131072, 25)](primals_16, buf5, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_16 buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_6[grid(8192, 25)](primals_18, buf6, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_18 buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch .float32) triton_poi_fused_7[grid(2048, 36)](primals_20, buf7, 2048, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_20 buf8 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32 ) triton_poi_fused_8[grid(128, 36)](primals_22, buf8, 128, 36, XBLOCK =32, YBLOCK=32, num_warps=4, num_stages=1) del primals_22 buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_9[grid(123008)](buf10, primals_2, 123008, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_10[grid(50176)](buf12, primals_5, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_11[grid(18432)](buf14, primals_7, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256)) buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_12[grid(1024, 4)]( buf15, primals_9, buf16, buf33, 1024, 4, XBLOCK=1, YBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf17) del primals_11 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf18) del primals_13 buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_exp_mul_13[grid(16)](buf20, buf18, buf17, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1) buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0) del buf15 extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024), (1, 4), 0), out=buf22) buf23 = buf22 del buf22 buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_14[grid(4096)](buf23, primals_15, buf32, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding= (0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_15[grid(12800)](buf25, primals_17, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_16[grid(43264)](buf27, primals_19, 43264, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_17[grid(115200)](buf29, primals_21, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 64, 64), (16384, 1, 256, 4)) buf31 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_18[grid(16, 4096)](buf30, primals_23, buf31, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf30 del primals_23 return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32, primals_14, primals_12, primals_10, buf33) class Decoder(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(Decoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.unsqueeze(-1).unsqueeze(-1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) reconstruction = F.sigmoid(self.deconv4(x)) return reconstruction class Encoder(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(Encoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma class VAENew(nn.Module): """ Variational Autoencoder """ def __init__(self, img_channels, latent_size): super(VAENew, self).__init__() self.encoder = Encoder(img_channels, latent_size) self.decoder = Decoder(img_channels, latent_size) def forward(self, input_0): primals_1 = self.encoder.conv1.weight primals_2 = self.encoder.conv1.bias primals_4 = self.encoder.conv2.weight primals_5 = self.encoder.conv2.bias primals_6 = self.encoder.conv3.weight primals_7 = self.encoder.conv3.bias primals_8 = self.encoder.conv4.weight primals_9 = self.encoder.conv4.bias primals_10 = self.encoder.fc_mu.weight primals_11 = self.encoder.fc_mu.bias primals_12 = self.encoder.fc_logsigma.weight primals_13 = self.encoder.fc_logsigma.bias primals_14 = self.decoder.fc1.weight primals_15 = self.decoder.fc1.bias primals_16 = self.decoder.deconv1.weight primals_17 = self.decoder.deconv1.bias primals_18 = self.decoder.deconv2.weight primals_19 = self.decoder.deconv2.bias primals_20 = self.decoder.deconv3.weight primals_21 = self.decoder.deconv3.bias primals_22 = self.decoder.deconv4.weight primals_23 = self.decoder.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0], output[1], output[2]
FabianSchuetze/world-models
VAE
false
13,708
[ "MIT" ]
440
d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
https://github.com/FabianSchuetze/world-models/tree/d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
FSM
import torch from torch import Tensor from torch import nn from torch.nn import functional as F class FSM(nn.Module): def __init__(self, c1, c2): super().__init__() self.conv_atten = nn.Conv2d(c1, c1, 1, bias=False) self.conv = nn.Conv2d(c1, c2, 1, bias=False) def forward(self, x: 'Tensor') ->Tensor: atten = self.conv_atten(F.avg_pool2d(x, x.shape[2:])).sigmoid() feat = torch.mul(x, atten) x = x + feat return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4, 'c2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](primals_1, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) return buf3, primals_1, primals_2, primals_3, buf0, buf1, buf2 class FSMNew(nn.Module): def __init__(self, c1, c2): super().__init__() self.conv_atten = nn.Conv2d(c1, c1, 1, bias=False) self.conv = nn.Conv2d(c1, c2, 1, bias=False) def forward(self, input_0): primals_2 = self.conv_atten.weight primals_3 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Genevievekim/semantic-segmentation-1
FSM
false
13,709
[ "BSD-3-Clause" ]
196
f28b026e44cff80fe3ca4cac94cea27e4073821b
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
Quantization
import torch import torch.nn as nn import torch.utils.data class Quantization(nn.Module): @staticmethod def forward(input): return torch.round(input) @staticmethod def backward(grad_output): grad_input = grad_output.clone() return grad_input def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.nearbyint(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_round_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class QuantizationNew(nn.Module): @staticmethod def backward(grad_output): grad_input = grad_output.clone() return grad_input def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Geunwoo-Jeon/iclr_17_compression
Quantization
false
13,710
[ "MIT" ]
56
a28746b1f1c518d91125d8f289d9511cde488c77
https://github.com/Geunwoo-Jeon/iclr_17_compression/tree/a28746b1f1c518d91125d8f289d9511cde488c77
UNet
import torch import torch.nn as nn import torch.nn.functional as F from torch.functional import F from torch.nn import functional as F class down(nn.Module): """ A class for creating neural network blocks containing layers: Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels, filterSize): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used as input and output channels for the second convolutional layer. filterSize : int filter size for the convolution filter. input N would create a N x N filter. """ super(down, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride= 1, padding=int((filterSize - 1) / 2)) self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride =1, padding=int((filterSize - 1) / 2)) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. Returns ------- tensor output of the NN block. """ x = F.avg_pool2d(x, 2) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(x), negative_slope=0.1) return x class up(nn.Module): """ A class for creating neural network blocks containing layers: Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x, skpCn) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used for setting input and output channels for the second convolutional layer. """ super(up, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1) def forward(self, x, skpCn): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. skpCn : tensor skip connection input to the NN block. Returns ------- tensor output of the NN block. """ x = F.interpolate(x, scale_factor=2, mode='bilinear') x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope=0.1) return x class UNet(nn.Module): """ A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet. """ super(UNet, self).__init__() self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3) self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3) self.down1 = down(32, 64, 5) self.down2 = down(64, 128, 3) self.down3 = down(128, 256, 3) self.down4 = down(256, 512, 3) self.down5 = down(512, 512, 3) self.up1 = up(512, 512) self.up2 = up(512, 256) self.up3 = up(256, 128) self.up4 = up(128, 64) self.up5 = up(64, 32) self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet. """ x = F.leaky_relu(self.conv1(x), negative_slope=0.1) s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1) s2 = self.down1(s1) s3 = self.down2(s2) s4 = self.down3(s3) s5 = self.down4(s4) x = self.down5(s5) x = self.up1(x, s5) x = self.up2(x, s4) x = self.up3(x, s3) x = self.up4(x, s2) x = self.up5(x, s1) x = F.leaky_relu(self.conv3(x), negative_slope=0.1) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'inChannels': 4, 'outChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F from torch.functional import F from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = triton_helpers.minimum(tmp10, tmp9) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 4 x0 = xindex % 4 x6 = xindex // 16 x2 = xindex // 16 % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 1024 x0 = xindex % 16 x2 = xindex // 16384 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0).to(tl .int1) tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 1024, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 16 * (-512 + x1) + 8192 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 8 % 8 x0 = xindex % 8 x6 = xindex // 64 x2 = xindex // 64 % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 512 x0 = xindex % 64 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0).to( tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 7, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x6 = xindex // 256 x2 = xindex // 256 % 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0).to( tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 15, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x6 = xindex // 1024 x2 = xindex // 1024 % 128 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 128 x0 = xindex % 1024 x2 = xindex // 131072 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0 ).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x6 = xindex // 4096 x2 = xindex // 4096 % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0 ).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47) = args args.clear() assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_29, (512,), (1,)) assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_33, (256,), (1,)) assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (64,), (1,)) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32,), (1,)) assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_45, (32,), (1,)) assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_47, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0, primals_2, buf1, buf2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3, primals_5, buf4, buf5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) triton_poi_fused_avg_pool2d_1[grid(131072)](buf5, buf6, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7, primals_7, buf8, buf9, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf12 = buf7 del buf7 triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf10, primals_9, buf11, buf12, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) triton_poi_fused_avg_pool2d_3[grid(65536)](buf12, buf13, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1)) buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf14, primals_11, buf15, buf16, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1)) buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf19 = buf14 del buf14 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf17, primals_13, buf18, buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_avg_pool2d_5[grid(32768)](buf19, buf20, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1)) buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf21, primals_15, buf22, buf23, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf26 = buf21 del buf21 triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf24, primals_17, buf25, buf26, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused_avg_pool2d_7[grid(16384)](buf26, buf27, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1)) buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf28, primals_19, buf29, buf30, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_19 buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1)) buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) buf33 = buf28 del buf28 triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf31, primals_21, buf32, buf33, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch. float32) triton_poi_fused_avg_pool2d_9[grid(8192)](buf33, buf34, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1)) buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_10[grid(8192)](buf35, primals_23, buf36, buf37, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf35 del primals_23 buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1)) buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_11[grid(8192)](buf38, primals_25, buf39, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_12[grid(4)](buf40, 4, XBLOCK=4, num_warps =1, num_stages=1) buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_13[grid(4)](buf41, 4, XBLOCK=4, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_12[grid(4)](buf42, 4, XBLOCK=4, num_warps =1, num_stages=1) buf43 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_13[grid(4)](buf43, 4, XBLOCK=4, num_warps=1, num_stages=1) buf46 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf46, 4, XBLOCK=4, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf48, 4, XBLOCK=4, num_warps=1, num_stages=1) buf45 = buf31 del buf31 buf49 = buf45 del buf45 buf50 = buf49 del buf49 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15[ grid(32768)](buf50, buf41, buf42, buf39, buf38, primals_25, buf40, buf43, buf46, buf48, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf38 del primals_25 buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1)) buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf51, primals_27, buf52, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0 ) del buf24 triton_poi_fused_cat_17[grid(65536)](buf52, buf51, primals_27, buf33, buf53, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf51 del primals_27 buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1)) buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf54, primals_29, buf55, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_18[grid(8)](buf56, 8, XBLOCK=8, num_warps =1, num_stages=1) buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_19[grid(8)](buf57, 8, XBLOCK=8, num_warps=1, num_stages=1) buf58 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_18[grid(8)](buf58, 8, XBLOCK=8, num_warps =1, num_stages=1) buf59 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused_add_clamp_19[grid(8)](buf59, 8, XBLOCK=8, num_warps=1, num_stages=1) buf62 = empty_strided_cuda((8,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf62, 8, XBLOCK=8, num_warps=1, num_stages=1) buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf64, 8, XBLOCK=8, num_warps=1, num_stages=1) buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0) del buf17 buf65 = buf61 del buf61 buf66 = buf65 del buf65 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21[ grid(131072)](buf66, buf57, buf58, buf55, buf54, primals_29, buf56, buf59, buf62, buf64, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf54 del primals_29 buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1)) buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf67, primals_31, buf68, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_cat_23[grid(131072)](buf68, buf67, primals_31, buf26, buf69, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf67 del primals_31 buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1)) buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf70, primals_33, buf71, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf72, 16, XBLOCK=16, num_warps=1, num_stages=1) buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf73, 16, XBLOCK=16, num_warps=1, num_stages=1) buf74 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf74, 16, XBLOCK=16, num_warps=1, num_stages=1) buf75 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf75, 16, XBLOCK=16, num_warps=1, num_stages=1) buf78 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf78, 16, XBLOCK=16, num_warps=1, num_stages=1) buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf80, 16, XBLOCK=16, num_warps=1, num_stages=1) buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16, 1), 0) del buf10 buf81 = buf77 del buf77 buf82 = buf81 del buf81 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27[ grid(262144)](buf82, buf73, buf74, buf71, buf70, primals_33, buf72, buf75, buf78, buf80, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_33 buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1)) buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf83, primals_35, buf84, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) triton_poi_fused_cat_29[grid(262144)](buf84, buf83, primals_35, buf19, buf85, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf83 del primals_35 buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1)) buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf86, primals_37, buf87, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_30[grid(32)](buf88, 32, XBLOCK=32, num_warps=1, num_stages=1) buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_31[grid(32)](buf89, 32, XBLOCK=32, num_warps=1, num_stages=1) buf90 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_30[grid(32)](buf90, 32, XBLOCK=32, num_warps=1, num_stages=1) buf91 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_add_clamp_31[grid(32)](buf91, 32, XBLOCK=32, num_warps=1, num_stages=1) buf94 = empty_strided_cuda((32,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf94, 32, XBLOCK=32, num_warps=1, num_stages=1) buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf96, 32, XBLOCK=32, num_warps=1, num_stages=1) buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0) del buf3 buf97 = buf93 del buf93 buf98 = buf97 del buf97 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33[ grid(524288)](buf98, buf89, buf90, buf87, buf86, primals_37, buf88, buf91, buf94, buf96, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf86 del primals_37 buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf99, primals_39, buf100, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) triton_poi_fused_cat_35[grid(524288)](buf100, buf99, primals_39, buf12, buf101, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf99 del primals_39 buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf102, primals_41, buf103, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_36[grid(64)](buf104, 64, XBLOCK=64, num_warps=1, num_stages=1) buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_37[grid(64)](buf105, 64, XBLOCK=64, num_warps=1, num_stages=1) buf106 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_36[grid(64)](buf106, 64, XBLOCK=64, num_warps=1, num_stages=1) buf107 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_37[grid(64)](buf107, 64, XBLOCK=64, num_warps=1, num_stages=1) buf110 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf110, 64, XBLOCK=64, num_warps=1, num_stages=1) buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf112, 64, XBLOCK=64, num_warps=1, num_stages=1) buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) buf113 = buf109 del buf109 buf114 = buf113 del buf113 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39[ grid(1048576)](buf114, buf105, buf106, buf103, buf102, primals_41, buf104, buf107, buf110, buf112, 1048576, XBLOCK= 1024, num_warps=4, num_stages=1) del buf102 del primals_41 buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_40[grid(524288)](buf115, primals_43, buf116, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_41[grid(1048576)](buf116, buf115, primals_43, buf5, buf117, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf120 = buf115 del buf115 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf118, primals_45, buf119, buf120, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf118 del primals_45 buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool) buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64, 1), 0) del buf70 triton_poi_fused_convolution_leaky_relu_42[grid(65536)](buf121, primals_47, buf122, buf123, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf121 del primals_47 return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30, buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42, buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57, buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72, buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87, buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101, buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114, buf116, buf117, buf119, buf120, buf122) class down(nn.Module): """ A class for creating neural network blocks containing layers: Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels, filterSize): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used as input and output channels for the second convolutional layer. filterSize : int filter size for the convolution filter. input N would create a N x N filter. """ super(down, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride= 1, padding=int((filterSize - 1) / 2)) self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride =1, padding=int((filterSize - 1) / 2)) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. Returns ------- tensor output of the NN block. """ x = F.avg_pool2d(x, 2) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(x), negative_slope=0.1) return x class up(nn.Module): """ A class for creating neural network blocks containing layers: Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x, skpCn) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used for setting input and output channels for the second convolutional layer. """ super(up, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1) def forward(self, x, skpCn): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. skpCn : tensor skip connection input to the NN block. Returns ------- tensor output of the NN block. """ x = F.interpolate(x, scale_factor=2, mode='bilinear') x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope=0.1) return x class UNetNew(nn.Module): """ A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet. """ super(UNetNew, self).__init__() self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3) self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3) self.down1 = down(32, 64, 5) self.down2 = down(64, 128, 3) self.down3 = down(128, 256, 3) self.down4 = down(256, 512, 3) self.down5 = down(512, 512, 3) self.up1 = up(512, 512) self.up2 = up(512, 256) self.up3 = up(256, 128) self.up4 = up(128, 64) self.up5 = up(64, 32) self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.down1.conv1.weight primals_7 = self.down1.conv1.bias primals_8 = self.down1.conv2.weight primals_9 = self.down1.conv2.bias primals_10 = self.down2.conv1.weight primals_11 = self.down2.conv1.bias primals_12 = self.down2.conv2.weight primals_13 = self.down2.conv2.bias primals_14 = self.down3.conv1.weight primals_15 = self.down3.conv1.bias primals_16 = self.down3.conv2.weight primals_17 = self.down3.conv2.bias primals_18 = self.down4.conv1.weight primals_19 = self.down4.conv1.bias primals_20 = self.down4.conv2.weight primals_21 = self.down4.conv2.bias primals_22 = self.down5.conv1.weight primals_23 = self.down5.conv1.bias primals_24 = self.down5.conv2.weight primals_25 = self.down5.conv2.bias primals_26 = self.up1.conv1.weight primals_27 = self.up1.conv1.bias primals_28 = self.up1.conv2.weight primals_29 = self.up1.conv2.bias primals_30 = self.up2.conv1.weight primals_31 = self.up2.conv1.bias primals_32 = self.up2.conv2.weight primals_33 = self.up2.conv2.bias primals_34 = self.up3.conv1.weight primals_35 = self.up3.conv1.bias primals_36 = self.up3.conv2.weight primals_37 = self.up3.conv2.bias primals_38 = self.up4.conv1.weight primals_39 = self.up4.conv1.bias primals_40 = self.up4.conv2.weight primals_41 = self.up4.conv2.bias primals_42 = self.up5.conv1.weight primals_43 = self.up5.conv1.bias primals_44 = self.up5.conv2.weight primals_45 = self.up5.conv2.bias primals_46 = self.conv3.weight primals_47 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return output[0]
CM-BF/FeatureFlow
UNet
false
13,711
[ "MIT" ]
161
06642697922f17211e5faa353e24b1a0946885b1
https://github.com/CM-BF/FeatureFlow/tree/06642697922f17211e5faa353e24b1a0946885b1
PA
import torch from torch import nn class PA(nn.Module): def __init__(self, dim): super().__init__() self.pa_conv = nn.Conv2d(dim, dim, 3, 1, 1, groups=dim) def forward(self, x): return x * self.pa_conv(x).sigmoid() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_mul_sigmoid_0[grid(256)](buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, buf1 class PANew(nn.Module): def __init__(self, dim): super().__init__() self.pa_conv = nn.Conv2d(dim, dim, 3, 1, 1, groups=dim) def forward(self, input_0): primals_1 = self.pa_conv.weight primals_2 = self.pa_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Genevievekim/semantic-segmentation-1
PA
false
13,712
[ "BSD-3-Clause" ]
196
f28b026e44cff80fe3ca4cac94cea27e4073821b
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
BasicBlock_AP
import torch import torch.nn as nn import torch.nn.functional as F class BasicBlock_AP(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, norm='instancenorm'): super(BasicBlock_AP, self).__init__() self.norm = norm self.stride = stride self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.GroupNorm(planes, planes, affine=True ) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.GroupNorm(planes, planes, affine=True ) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self. expansion * planes, kernel_size=1, stride=1, bias=False), nn.AvgPool2d(kernel_size=2, stride=2), nn.GroupNorm(self. expansion * planes, self.expansion * planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self. expansion * planes)) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) if self.stride != 1: out = F.avg_pool2d(out, kernel_size=2, stride=2) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp32 = 0.0 tmp33 = tmp31 <= tmp32 tl.store(out_ptr2 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp33, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_0[grid(16)](buf0, primals_3, primals_4, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_4 buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid (16)](buf6, primals_6, primals_7, primals_2, buf7, buf11, buf12, buf10, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_7 return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor( buf10, (4, 4), (4, 1), 0), buf12) class BasicBlock_APNew(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, norm='instancenorm'): super(BasicBlock_APNew, self).__init__() self.norm = norm self.stride = stride self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.GroupNorm(planes, planes, affine=True ) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.GroupNorm(planes, planes, affine=True ) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self. expansion * planes, kernel_size=1, stride=1, bias=False), nn.AvgPool2d(kernel_size=2, stride=2), nn.GroupNorm(self. expansion * planes, self.expansion * planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self. expansion * planes)) def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.bn1.weight primals_4 = self.bn1.bias primals_5 = self.conv2.weight primals_6 = self.bn2.weight primals_7 = self.bn2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
GeorgeCazenavette/mtt-distillation
BasicBlock_AP
false
13,713
[ "MIT" ]
105
e13a65980183fbc33238ca6cbb6cfec819018e2d
https://github.com/GeorgeCazenavette/mtt-distillation/tree/e13a65980183fbc33238ca6cbb6cfec819018e2d
SqueezeExcitation
import torch from torch import Tensor from typing import Optional from torch import nn from torch.nn import functional as F def _make_divisible(v: 'float', divisor: 'int', min_value: 'Optional[int]'=None ) ->int: """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class SqueezeExcitation(nn.Module): def __init__(self, ch, squeeze_factor=4): super().__init__() squeeze_ch = _make_divisible(ch // squeeze_factor, 8) self.fc1 = nn.Conv2d(ch, squeeze_ch, 1) self.relu = nn.ReLU(True) self.fc2 = nn.Conv2d(squeeze_ch, ch, 1) def _scale(self, x: 'Tensor') ->Tensor: scale = F.adaptive_avg_pool2d(x, 1) scale = self.fc2(self.relu(self.fc1(scale))) return F.hardsigmoid(scale, True) def forward(self, x: 'Tensor') ->Tensor: scale = self._scale(x) return scale * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import Tensor from typing import Optional from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 16 x1 = xindex // 16 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + x4, xmask) tmp2 = tmp0 + tmp1 tmp3 = 3.0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 6.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tmp9 = 0.16666666666666666 tmp10 = tmp8 * tmp9 tmp12 = tmp10 * tmp11 tl.store(out_ptr0 + x4, tmp12, xmask) @triton.jit def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -3.0 tmp4 = tmp2 > tmp3 tmp5 = 3.0 tmp6 = tmp2 < tmp5 tmp7 = tmp4 & tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(32)](buf3, primals_3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_hardsigmoid_mul_2[grid(256)](buf4, primals_5, primals_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_convolution_hardsigmoid_backward_3[grid(16)](buf4, primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del primals_5 return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6 def _make_divisible(v: 'float', divisor: 'int', min_value: 'Optional[int]'=None ) ->int: """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class SqueezeExcitationNew(nn.Module): def __init__(self, ch, squeeze_factor=4): super().__init__() squeeze_ch = _make_divisible(ch // squeeze_factor, 8) self.fc1 = nn.Conv2d(ch, squeeze_ch, 1) self.relu = nn.ReLU(True) self.fc2 = nn.Conv2d(squeeze_ch, ch, 1) def _scale(self, x: 'Tensor') ->Tensor: scale = F.adaptive_avg_pool2d(x, 1) scale = self.fc2(self.relu(self.fc1(scale))) return F.hardsigmoid(scale, True) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Genevievekim/semantic-segmentation-1
SqueezeExcitation
false
13,714
[ "BSD-3-Clause" ]
196
f28b026e44cff80fe3ca4cac94cea27e4073821b
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
GlobalAttention
import torch import torch.nn as nn import torch.utils.data import torch.cuda import torch.optim def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttention(nn.Module): """ Luong Attention. Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. H_1 H_2 H_3 ... H_n q q q q | | | | \\ | | / ..... \\ | / a Constructs a unit mapping. $$(H_1 + H_n, q) => (a)$$ Where H is of `batch x n x dim` and q is of `batch x dim`. Luong Attention (dot, general): The full function is $$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$. * dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$ * general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$ Bahdanau Attention (mlp): $$c = \\sum_{j=1}^{SeqLength}_jh_j$$. The Alignment-function $$a$$ computes an alignment as: $$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$. """ def __init__(self, dim, coverage=False, attn_type='dot'): super(GlobalAttention, self).__init__() self.dim = dim self.attn_type = attn_type assert self.attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type.' if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) self.sm = nn.Softmax() self.tanh = nn.Tanh() self.mask = None if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def applyMask(self, mask): self.mask = mask def score(self, h_t, h_s): """ h_t (FloatTensor): batch x tgt_len x dim h_s (FloatTensor): batch x src_len x dim returns scores (FloatTensor): batch x tgt_len x src_len: raw attention scores for each src index """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input, context, coverage=None): """ input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output. context (FloatTensor): batch x src_len x dim: src hidden states coverage (FloatTensor): None (not supported yet) """ if input.dim() == 2: one_step = True input = input.unsqueeze(1) else: one_step = False batch, sourceL, dim = context.size() batch_, targetL, dim_ = input.size() aeq(batch, batch_) aeq(dim, dim_) aeq(self.dim, dim) if coverage is not None: batch_, sourceL_ = coverage.size() aeq(batch, batch_) aeq(sourceL, sourceL_) if self.mask is not None: beam_, batch_, sourceL_ = self.mask.size() aeq(batch, batch_ * beam_) aeq(sourceL, sourceL_) if coverage is not None: cover = coverage.view(-1).unsqueeze(1) context += self.linear_cover(cover).view_as(context) context = self.tanh(context) align = self.score(input, context) if self.mask is not None: mask_ = self.mask.view(batch, 1, sourceL) align.data.masked_fill_(mask_, -float('inf')) align_vectors = self.sm(align.view(batch * targetL, sourceL)) align_vectors = align_vectors.view(batch, targetL, sourceL) c = torch.bmm(align_vectors, context) concat_c = torch.cat([c, input], 2).view(batch * targetL, dim * 2) attn_h = self.linear_out(concat_c).view(batch, targetL, dim) if self.attn_type in ['general', 'dot']: attn_h = self.tanh(attn_h) if one_step: attn_h = attn_h.squeeze(1) align_vectors = align_vectors.squeeze(1) batch_, dim_ = attn_h.size() aeq(batch, batch_) aeq(dim, dim_) batch_, sourceL_ = align_vectors.size() aeq(batch, batch_) aeq(sourceL, sourceL_) else: attn_h = attn_h.transpose(0, 1).contiguous() align_vectors = align_vectors.transpose(0, 1).contiguous() targetL_, batch_, dim_ = attn_h.size() aeq(targetL, targetL_) aeq(batch, batch_) aeq(dim, dim_) targetL_, batch_, sourceL_ = align_vectors.size() aeq(targetL, targetL_) aeq(batch, batch_) aeq(sourceL, sourceL_) return attn_h, align_vectors def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data import torch.cuda import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x3, tmp1, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5 def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttentionNew(nn.Module): """ Luong Attention. Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. H_1 H_2 H_3 ... H_n q q q q | | | | \\ | | / ..... \\ | / a Constructs a unit mapping. $$(H_1 + H_n, q) => (a)$$ Where H is of `batch x n x dim` and q is of `batch x dim`. Luong Attention (dot, general): The full function is $$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$. * dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$ * general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$ Bahdanau Attention (mlp): $$c = \\sum_{j=1}^{SeqLength}_jh_j$$. The Alignment-function $$a$$ computes an alignment as: $$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$. """ def __init__(self, dim, coverage=False, attn_type='dot'): super(GlobalAttentionNew, self).__init__() self.dim = dim self.attn_type = attn_type assert self.attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type.' if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) self.sm = nn.Softmax() self.tanh = nn.Tanh() self.mask = None if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def applyMask(self, mask): self.mask = mask def score(self, h_t, h_s): """ h_t (FloatTensor): batch x tgt_len x dim h_s (FloatTensor): batch x src_len x dim returns scores (FloatTensor): batch x tgt_len x src_len: raw attention scores for each src index """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input_0, input_1): primals_3 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Flamexmt/LMA
GlobalAttention
false
13,715
[ "MIT" ]
321
f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
https://github.com/Flamexmt/LMA/tree/f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
IdfCombination
import torch from torch import nn class IdfCombination(nn.Module): def forward(self, scores, idf): idf = idf.softmax(dim=1) return (scores * idf).sum(dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp1 / tmp7 tmp9 = tmp0 * tmp8 tmp11 = tmp2 / tmp7 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp15 = tmp4 / tmp7 tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp19 = tmp6 / tmp7 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_mul_sum_1[grid(64)](arg1_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 del buf0 return buf1, class IdfCombinationNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Georgetown-IR-Lab/OpenNIR
IdfCombination
false
13,716
[ "MIT" ]
140
7d93e8643fe311e3e9c7a0678efe9775fd80485e
https://github.com/Georgetown-IR-Lab/OpenNIR/tree/7d93e8643fe311e3e9c7a0678efe9775fd80485e
MLP
import torch from torch import Tensor from torch import nn class MLP(nn.Module): def __init__(self, dim, embed_dim): super().__init__() self.proj = nn.Linear(dim, embed_dim) def forward(self, x: 'Tensor') ->Tensor: x = x.flatten(2).transpose(1, 2) x = self.proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class MLPNew(nn.Module): def __init__(self, dim, embed_dim): super().__init__() self.proj = nn.Linear(dim, embed_dim) def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Genevievekim/semantic-segmentation-1
MLP
false
13,717
[ "BSD-3-Clause" ]
196
f28b026e44cff80fe3ca4cac94cea27e4073821b
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
GEGLU
import torch from torch import nn import torch.nn.functional as F class GEGLU(nn.Module): def forward(self, x): x, gates = x.chunk(2, dim=-1) return x * F.gelu(gates) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865476 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp10 = tmp0 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GEGLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Gitsamshi/DALLE-pytorch
GEGLU
false
13,718
[ "MIT" ]
4,025
6cfc43158a4615865e97c839133290afcf289824
https://github.com/Gitsamshi/DALLE-pytorch/tree/6cfc43158a4615865e97c839133290afcf289824
DivideMax
import torch from torch import nn class DivideMax(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): maxes = x.amax(dim=self.dim, keepdim=True).detach() return x / maxes def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_amax_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_amax_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class DivideMaxNew(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Gitsamshi/DALLE-pytorch
DivideMax
false
13,719
[ "MIT" ]
4,025
6cfc43158a4615865e97c839133290afcf289824
https://github.com/Gitsamshi/DALLE-pytorch/tree/6cfc43158a4615865e97c839133290afcf289824
SumCombination
import torch from torch import nn class SumCombination(nn.Module): def __init__(self, dim_in, normalize=True): super(SumCombination, self).__init__() self.conv = nn.Conv1d(dim_in, 1, 1) self.normalize = normalize def forward(self, x, qlen): scores = self.conv(x.permute(0, 2, 1))[:, :, 0] if self.normalize: scores = scores.sum(dim=1) / qlen.type_as(scores) return scores def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_div_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp0 + tmp2 tmp5 = tmp3 / tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4), (4, 4, 1)) buf2 = buf0 del buf0 triton_poi_fused_div_sum_1[grid(64)](buf1, primals_3, primals_4, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del primals_3 return buf2, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0) class SumCombinationNew(nn.Module): def __init__(self, dim_in, normalize=True): super(SumCombinationNew, self).__init__() self.conv = nn.Conv1d(dim_in, 1, 1) self.normalize = normalize def forward(self, input_0, input_1): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Georgetown-IR-Lab/OpenNIR
SumCombination
false
13,720
[ "MIT" ]
140
7d93e8643fe311e3e9c7a0678efe9775fd80485e
https://github.com/Georgetown-IR-Lab/OpenNIR/tree/7d93e8643fe311e3e9c7a0678efe9775fd80485e
MaxPooling
import torch import torch.nn as nn class MaxPooling(nn.Module): def __init__(self): super(MaxPooling, self).__init__() self.MIN = -1000000.0 """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, x, x_mask): """ x_output: num_items x input_size x 1 --> num_items x input_size """ empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x) x_now = x.clone() x_now.data.masked_fill_(empty_mask.data, self.MIN) x_output = x_now.max(1)[0] x_output.data.masked_fill_(x_output.data.eq(self.MIN), 0) return x_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_masked_fill_max_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp7 = tmp6 == tmp1 tmp9 = tl.where(tmp7, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp12 = tmp11 == tmp1 tmp14 = tl.where(tmp12, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp17 = tmp16 == tmp1 tmp19 = tl.where(tmp17, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp20 == tmp4 tmp22 = tl.where(tmp21, tmp1, tmp20) tl.store(in_out_ptr0 + x2, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_eq_masked_fill_max_0[grid(16)](buf1, arg0_1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class MaxPoolingNew(nn.Module): def __init__(self): super(MaxPoolingNew, self).__init__() self.MIN = -1000000.0 """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
GingerNg/SDNet
MaxPooling
false
13,721
[ "MIT" ]
112
48ad8cc57c9a02aaad10e34d0c91a174ac68f056
https://github.com/GingerNg/SDNet/tree/48ad8cc57c9a02aaad10e34d0c91a174ac68f056
LinearBlock
import torch from scipy.stats import truncnorm def truncated_normal_(tensor, mean=0.0, std=1.0): values = truncnorm.rvs(-2, 2, size=tensor.shape) values = mean + std * values tensor.copy_(torch.from_numpy(values)) return tensor def fc_init_(module): if hasattr(module, 'weight') and module.weight is not None: truncated_normal_(module.weight.data, mean=0.0, std=0.01) if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.constant_(module.bias.data, 0.0) return module class LinearBlock(torch.nn.Module): def __init__(self, input_size, output_size): super(LinearBlock, self).__init__() self.relu = torch.nn.ReLU() self.normalize = torch.nn.BatchNorm1d(output_size, affine=True, momentum=0.999, eps=0.001, track_running_stats=False) self.linear = torch.nn.Linear(input_size, output_size) fc_init_(self.linear) def forward(self, x): x = self.linear(x) x = self.normalize(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from scipy.stats import truncnorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = rindex // 4 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0 + 16 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 0.001 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0 , in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 0.001 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = 0.0 tmp17 = tmp15 <= tmp16 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf4 = empty_strided_cuda((1, 4, 1), (4, 1, 1), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_0[grid(4)](buf0, buf1, buf2, buf4, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1[ grid(64)](buf0, buf1, buf2, primals_4, primals_5, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del primals_5 return buf5, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf4, (4,), (1,), 0 ), buf6, reinterpret_tensor(buf1, (1, 4, 1), (4, 1, 1), 0) def truncated_normal_(tensor, mean=0.0, std=1.0): values = truncnorm.rvs(-2, 2, size=tensor.shape) values = mean + std * values tensor.copy_(torch.from_numpy(values)) return tensor def fc_init_(module): if hasattr(module, 'weight') and module.weight is not None: truncated_normal_(module.weight.data, mean=0.0, std=0.01) if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.constant_(module.bias.data, 0.0) return module class LinearBlockNew(torch.nn.Module): def __init__(self, input_size, output_size): super(LinearBlockNew, self).__init__() self.relu = torch.nn.ReLU() self.normalize = torch.nn.BatchNorm1d(output_size, affine=True, momentum=0.999, eps=0.001, track_running_stats=False) self.linear = torch.nn.Linear(input_size, output_size) fc_init_(self.linear) def forward(self, input_0): primals_2 = self.normalize.weight primals_4 = self.normalize.bias primals_1 = self.linear.weight primals_5 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Brikwerk/learn2learn
LinearBlock
false
13,722
[ "MIT" ]
1,774
7997c13c26ec627d13ce77ba98427260df78ada8
https://github.com/Brikwerk/learn2learn/tree/7997c13c26ec627d13ce77ba98427260df78ada8
SumAggregator
import torch import torch.nn as nn class SumAggregator(nn.Module): def __init__(self): super(SumAggregator, self).__init__() def forward(self, neighbor): return torch.sum(neighbor, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class SumAggregatorNew(nn.Module): def __init__(self): super(SumAggregatorNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
GraphNAS/GraphNAS
SumAggregator
false
13,723
[ "Apache-2.0" ]
94
b4f05bb10b8b96bb9e82344bfae36a23db2431a6
https://github.com/GraphNAS/GraphNAS/tree/b4f05bb10b8b96bb9e82344bfae36a23db2431a6
GDN
from torch.autograd import Function import torch import torch.nn as nn import torch.utils.data class LowerBound(Function): @staticmethod def forward(ctx, inputs, bound): b = torch.ones_like(inputs) * bound ctx.save_for_backward(inputs, b) return torch.max(inputs, b) @staticmethod def backward(ctx, grad_output): inputs, b = ctx.saved_tensors pass_through_1 = inputs >= b pass_through_2 = grad_output < 0 pass_through = pass_through_1 | pass_through_2 return pass_through.type(grad_output.dtype) * grad_output, None class GDN(nn.Module): """Generalized divisive normalization layer. y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j])) """ def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1, reparam_offset=2 ** -18): super(GDN, self).__init__() self.inverse = inverse self.beta_min = beta_min self.gamma_init = gamma_init self.reparam_offset = reparam_offset self.build(ch) def build(self, ch): self.pedestal = self.reparam_offset ** 2 self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5 self.gamma_bound = self.reparam_offset beta = torch.sqrt(torch.ones(ch) + self.pedestal) self.beta = nn.Parameter(beta) eye = torch.eye(ch) g = self.gamma_init * eye g = g + self.pedestal gamma = torch.sqrt(g) self.gamma = nn.Parameter(gamma) self.pedestal = self.pedestal def forward(self, inputs): unfold = False if inputs.dim() == 5: unfold = True bs, ch, d, w, h = inputs.size() inputs = inputs.view(bs, ch, d * w, h) _, ch, _, _ = inputs.size() beta = LowerBound.apply(self.beta, self.beta_bound) beta = beta ** 2 - self.pedestal gamma = LowerBound.apply(self.gamma, self.gamma_bound) gamma = gamma ** 2 - self.pedestal gamma = gamma.view(ch, ch, 1, 1) norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta) norm_ = torch.sqrt(norm_) if self.inverse: outputs = inputs * norm_ else: outputs = inputs / norm_ if unfold: outputs = outputs.view(bs, ch, d, w, h) return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_maximum_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.814697265625e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp2 * tmp2 tmp4 = 1.4551915228366852e-11 tmp5 = tmp3 - tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_convolution_maximum_mul_pow_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0010000072652474046 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp2 * tmp2 tmp4 = 1.4551915228366852e-11 tmp5 = tmp3 - tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = libdevice.sqrt(tmp2) tmp5 = tmp3 / tmp4 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_maximum_mul_pow_sub_0[grid(16)](primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_pow_1[grid(256)](primals_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_convolution_maximum_mul_pow_sub_2[grid(4)](primals_2, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(buf1, reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3[grid(256)]( buf4, buf2, primals_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf5, primals_1, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf1, buf4 class LowerBound(Function): @staticmethod def forward(ctx, inputs, bound): b = torch.ones_like(inputs) * bound ctx.save_for_backward(inputs, b) return torch.max(inputs, b) @staticmethod def backward(ctx, grad_output): inputs, b = ctx.saved_tensors pass_through_1 = inputs >= b pass_through_2 = grad_output < 0 pass_through = pass_through_1 | pass_through_2 return pass_through.type(grad_output.dtype) * grad_output, None class GDNNew(nn.Module): """Generalized divisive normalization layer. y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j])) """ def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1, reparam_offset=2 ** -18): super(GDNNew, self).__init__() self.inverse = inverse self.beta_min = beta_min self.gamma_init = gamma_init self.reparam_offset = reparam_offset self.build(ch) def build(self, ch): self.pedestal = self.reparam_offset ** 2 self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5 self.gamma_bound = self.reparam_offset beta = torch.sqrt(torch.ones(ch) + self.pedestal) self.beta = nn.Parameter(beta) eye = torch.eye(ch) g = self.gamma_init * eye g = g + self.pedestal gamma = torch.sqrt(g) self.gamma = nn.Parameter(gamma) self.pedestal = self.pedestal def forward(self, input_0): primals_2 = self.beta primals_3 = self.gamma primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Geunwoo-Jeon/iclr_17_compression
GDN
false
13,724
[ "MIT" ]
56
a28746b1f1c518d91125d8f289d9511cde488c77
https://github.com/Geunwoo-Jeon/iclr_17_compression/tree/a28746b1f1c518d91125d8f289d9511cde488c77
BitEstimator
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Bitparm(nn.Module): """ save params """ def __init__(self, channel, final=False): super(Bitparm, self).__init__() self.final = final self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel). view(1, -1, 1, 1), 0, 0.01)) self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel). view(1, -1, 1, 1), 0, 0.01)) if not final: self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel ).view(1, -1, 1, 1), 0, 0.01)) else: self.a = None def forward(self, x): if self.final: return torch.sigmoid(x * F.softplus(self.h) + self.b) else: x = x * F.softplus(self.h) + self.b return x + torch.tanh(x) * torch.tanh(self.a) class BitEstimator(nn.Module): """ Estimate bit """ def __init__(self, channel): super(BitEstimator, self).__init__() self.f1 = Bitparm(channel) self.f2 = Bitparm(channel) self.f3 = Bitparm(channel) self.f4 = Bitparm(channel, True) def forward(self, x): x = self.f1(x) x = self.f2(x) x = self.f3(x) return self.f4(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_sigmoid_softplus_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last') tmp2 = 20.0 tmp3 = tmp1 > tmp2 tmp4 = tl_math.exp(tmp1) tmp5 = libdevice.log1p(tmp4) tmp6 = tl.where(tmp3, tmp1, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp12 = libdevice.tanh(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 > tmp2 tmp17 = tl_math.exp(tmp15) tmp18 = libdevice.log1p(tmp17) tmp19 = tl.where(tmp16, tmp15, tmp18) tmp20 = tmp14 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp25 = libdevice.tanh(tmp24) tmp26 = tmp23 * tmp25 tmp27 = tmp22 + tmp26 tmp29 = tmp28 > tmp2 tmp30 = tl_math.exp(tmp28) tmp31 = libdevice.log1p(tmp30) tmp32 = tl.where(tmp29, tmp28, tmp31) tmp33 = tmp27 * tmp32 tmp35 = tmp33 + tmp34 tmp36 = libdevice.tanh(tmp35) tmp38 = libdevice.tanh(tmp37) tmp39 = tmp36 * tmp38 tmp40 = tmp35 + tmp39 tmp42 = tmp41 > tmp2 tmp43 = tl_math.exp(tmp41) tmp44 = libdevice.log1p(tmp43) tmp45 = tl.where(tmp42, tmp41, tmp44) tmp46 = tmp40 * tmp45 tmp48 = tmp46 + tmp47 tmp49 = tl.sigmoid(tmp48) tl.store(in_out_ptr0 + x3, tmp49, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_12, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_softplus_tanh_0[grid(256)](buf1, primals_2, primals_1, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_12 return (buf1, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, buf1) class Bitparm(nn.Module): """ save params """ def __init__(self, channel, final=False): super(Bitparm, self).__init__() self.final = final self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel). view(1, -1, 1, 1), 0, 0.01)) self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel). view(1, -1, 1, 1), 0, 0.01)) if not final: self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel ).view(1, -1, 1, 1), 0, 0.01)) else: self.a = None def forward(self, x): if self.final: return torch.sigmoid(x * F.softplus(self.h) + self.b) else: x = x * F.softplus(self.h) + self.b return x + torch.tanh(x) * torch.tanh(self.a) class BitEstimatorNew(nn.Module): """ Estimate bit """ def __init__(self, channel): super(BitEstimatorNew, self).__init__() self.f1 = Bitparm(channel) self.f2 = Bitparm(channel) self.f3 = Bitparm(channel) self.f4 = Bitparm(channel, True) def forward(self, input_0): primals_1 = self.f1.h primals_3 = self.f1.b primals_4 = self.f1.a primals_5 = self.f2.h primals_6 = self.f2.b primals_7 = self.f2.a primals_8 = self.f3.h primals_9 = self.f3.b primals_10 = self.f3.a primals_11 = self.f4.h primals_12 = self.f4.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Geunwoo-Jeon/iclr_17_compression
BitEstimator
false
13,725
[ "MIT" ]
56
a28746b1f1c518d91125d8f289d9511cde488c77
https://github.com/Geunwoo-Jeon/iclr_17_compression/tree/a28746b1f1c518d91125d8f289d9511cde488c77
BCEWithLogitsLossWeighted
import torch import torch.nn as nn class WeightedLoss(nn.Module): def __init__(self): super(WeightedLoss, self).__init__() self.weighted = False def generate_weight_mask(self, mask, to_ignore=None): """ Generates a weight mask where pixel weights are inversely proportional to how many pixels are in the class @param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table. @param to_ignore: a list of classes (integers) to ignore when creating mask @return: a torch.FloatTensor that is same shape as mask. """ N = mask.shape[0] if self.weighted: weight_mask = torch.zeros_like(mask).float() for i in range(N): unique_object_labels = torch.unique(mask[i]) for obj in unique_object_labels: if to_ignore is not None and obj in to_ignore: continue num_pixels = torch.sum(mask[i] == obj, dtype=torch.float) weight_mask[i, mask[i] == obj] = 1 / num_pixels else: weight_mask = torch.ones_like(mask) if to_ignore is not None: for obj in to_ignore: weight_mask[mask == obj] = 0 return weight_mask class BCEWithLogitsLossWeighted(WeightedLoss): """ Compute weighted BCE loss with logits """ def __init__(self, weighted=False): super(BCEWithLogitsLossWeighted, self).__init__() self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(reduction='none') self.weighted = weighted def forward(self, x, target): """ Compute masked cosine similarity loss @param x: a [N x H x W] torch.FloatTensor of foreground logits @param target: a [N x H x W] torch.FloatTensor of values in [0, 1] """ temp = self.BCEWithLogitsLoss(x, target) weight_mask = self.generate_weight_mask(target) loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tl.broadcast_to(tmp1, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = tmp15 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0[ grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class WeightedLoss(nn.Module): def __init__(self): super(WeightedLoss, self).__init__() self.weighted = False def generate_weight_mask(self, mask, to_ignore=None): """ Generates a weight mask where pixel weights are inversely proportional to how many pixels are in the class @param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table. @param to_ignore: a list of classes (integers) to ignore when creating mask @return: a torch.FloatTensor that is same shape as mask. """ N = mask.shape[0] if self.weighted: weight_mask = torch.zeros_like(mask).float() for i in range(N): unique_object_labels = torch.unique(mask[i]) for obj in unique_object_labels: if to_ignore is not None and obj in to_ignore: continue num_pixels = torch.sum(mask[i] == obj, dtype=torch.float) weight_mask[i, mask[i] == obj] = 1 / num_pixels else: weight_mask = torch.ones_like(mask) if to_ignore is not None: for obj in to_ignore: weight_mask[mask == obj] = 0 return weight_mask class BCEWithLogitsLossWeightedNew(WeightedLoss): """ Compute weighted BCE loss with logits """ def __init__(self, weighted=False): super(BCEWithLogitsLossWeightedNew, self).__init__() self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(reduction='none') self.weighted = weighted def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Guangyun-Xu/uois
BCEWithLogitsLossWeighted
false
13,726
[ "MIT" ]
106
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
TransformerEncoderLayer
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(torch, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearReLU(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.feedforward = Feedforward(d_model, d_hidden, activation='relu') self.linear = Linear(d_hidden, d_model) def forward(self, x, padding=None): return self.linear(self.feedforward(x)) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF dot_products.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, dropout_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(dropout_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x, padding=None): return self.layernorm(x[0] + self.dropout(self.layer(*x, padding= padding))) class TransformerEncoderLayer(nn.Module): def __init__(self, dimension, n_heads, hidden, dropout): super().__init__() self.selfattn = ResidualBlock(MultiHead(dimension, dimension, n_heads, dropout), dimension, dropout) self.feedforward = ResidualBlock(LinearReLU(dimension, hidden), dimension, dropout) def forward(self, x, padding=None): return self.feedforward(self.selfattn(x, x, x, padding=padding)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4, 'n_heads': 4, 'hidden': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tmp0 * tmp5 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 buf20 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0) del buf6 buf21 = buf20 del buf20 buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0) del buf18 triton_poi_fused_add_mean_std_3[grid(16)](buf21, primals_1, buf19, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_5, primals_1, buf19, buf22, buf21, primals_6, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_6 buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf24) buf25 = reinterpret_tensor(buf24, (4, 4, 4), (16, 4, 1), 0) del buf24 buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf25, primals_8, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0) del buf26 triton_poi_fused_add_6[grid(64)](buf27, buf23, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_11, buf27, primals_12, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return (buf28, primals_1, primals_5, primals_11, buf5, buf9, buf13, buf17, buf19, reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf27, primals_9, buf29, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(torch, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearReLU(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.feedforward = Feedforward(d_model, d_hidden, activation='relu') self.linear = Linear(d_hidden, d_model) def forward(self, x, padding=None): return self.linear(self.feedforward(x)) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF dot_products.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, dropout_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(dropout_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x, padding=None): return self.layernorm(x[0] + self.dropout(self.layer(*x, padding= padding))) class TransformerEncoderLayerNew(nn.Module): def __init__(self, dimension, n_heads, hidden, dropout): super().__init__() self.selfattn = ResidualBlock(MultiHead(dimension, dimension, n_heads, dropout), dimension, dropout) self.feedforward = ResidualBlock(LinearReLU(dimension, hidden), dimension, dropout) def forward(self, input_0): primals_2 = self.selfattn.layer.wq.weight primals_3 = self.selfattn.layer.wk.weight primals_4 = self.selfattn.layer.wv.weight primals_5 = self.selfattn.layernorm.gamma primals_6 = self.selfattn.layernorm.beta primals_7 = self.feedforward.layer.feedforward.linear.weight primals_8 = self.feedforward.layer.feedforward.linear.bias primals_9 = self.feedforward.layer.linear.weight primals_10 = self.feedforward.layer.linear.bias primals_11 = self.feedforward.layernorm.gamma primals_12 = self.feedforward.layernorm.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
FGDBTKD/decaNLP
TransformerEncoderLayer
false
13,727
[ "BSD-3-Clause" ]
2,361
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
AveragePooling
import torch import torch.nn as nn class AveragePooling(nn.Module): def __init__(self): super(AveragePooling, self).__init__() """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, x, x_mask): """ x_output: num_items x input_size x 1 --> num_items x input_size """ x_now = x.clone() empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x_now) x_now.data.masked_fill_(empty_mask.data, 0) x_sum = torch.sum(x_now, 1) x_num = torch.sum(x_mask.eq(1).float(), 1).unsqueeze(1).expand_as(x_sum ) x_num = torch.clamp(x_num, min=1) return x_sum / x_num def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_masked_fill_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask) tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask) tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tl.where(tmp2, tmp1, tmp3) tmp6 = tmp5 == tmp1 tmp8 = tl.where(tmp6, tmp1, tmp7) tmp9 = tmp4 + tmp8 tmp11 = tmp10 == tmp1 tmp13 = tl.where(tmp11, tmp1, tmp12) tmp14 = tmp9 + tmp13 tmp16 = tmp15 == tmp1 tmp18 = tl.where(tmp16, tmp1, tmp17) tmp19 = tmp14 + tmp18 tmp20 = 1.0 tmp21 = tmp0 == tmp20 tmp22 = tmp21.to(tl.float32) tmp23 = tmp5 == tmp20 tmp24 = tmp23.to(tl.float32) tmp25 = tmp22 + tmp24 tmp26 = tmp10 == tmp20 tmp27 = tmp26.to(tl.float32) tmp28 = tmp25 + tmp27 tmp29 = tmp15 == tmp20 tmp30 = tmp29.to(tl.float32) tmp31 = tmp28 + tmp30 tmp32 = triton_helpers.maximum(tmp31, tmp20) tmp33 = tmp19 / tmp32 tl.store(out_ptr0 + x4, tmp33, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_masked_fill_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class AveragePoolingNew(nn.Module): def __init__(self): super(AveragePoolingNew, self).__init__() """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
GingerNg/SDNet
AveragePooling
false
13,728
[ "MIT" ]
112
48ad8cc57c9a02aaad10e34d0c91a174ac68f056
https://github.com/GingerNg/SDNet/tree/48ad8cc57c9a02aaad10e34d0c91a174ac68f056
CELossWeighted
import torch import torch.nn as nn class WeightedLoss(nn.Module): def __init__(self): super(WeightedLoss, self).__init__() self.weighted = False def generate_weight_mask(self, mask, to_ignore=None): """ Generates a weight mask where pixel weights are inversely proportional to how many pixels are in the class @param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table. @param to_ignore: a list of classes (integers) to ignore when creating mask @return: a torch.FloatTensor that is same shape as mask. """ N = mask.shape[0] if self.weighted: weight_mask = torch.zeros_like(mask).float() for i in range(N): unique_object_labels = torch.unique(mask[i]) for obj in unique_object_labels: if to_ignore is not None and obj in to_ignore: continue num_pixels = torch.sum(mask[i] == obj, dtype=torch.float) weight_mask[i, mask[i] == obj] = 1 / num_pixels else: weight_mask = torch.ones_like(mask) if to_ignore is not None: for obj in to_ignore: weight_mask[mask == obj] = 0 return weight_mask class CELossWeighted(WeightedLoss): """ Compute weighted CE loss with logits """ def __init__(self, weighted=False): super(CELossWeighted, self).__init__() self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none') self.weighted = weighted def forward(self, x, target): """ Compute weighted cross entropy @param x: a [N x C x H x W] torch.FloatTensor of values @param target: a [N x H x W] torch.LongTensor of values """ temp = self.CrossEntropyLoss(x, target) weight_mask = self.generate_weight_mask(target) loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tl.store(out_ptr0 + x2, tmp27, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp1, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tmp5 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf0, arg0_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2[grid(1)](buf4 , buf1, 1, 256, num_warps=2, num_stages=1) del buf1 return buf4, class WeightedLoss(nn.Module): def __init__(self): super(WeightedLoss, self).__init__() self.weighted = False def generate_weight_mask(self, mask, to_ignore=None): """ Generates a weight mask where pixel weights are inversely proportional to how many pixels are in the class @param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table. @param to_ignore: a list of classes (integers) to ignore when creating mask @return: a torch.FloatTensor that is same shape as mask. """ N = mask.shape[0] if self.weighted: weight_mask = torch.zeros_like(mask).float() for i in range(N): unique_object_labels = torch.unique(mask[i]) for obj in unique_object_labels: if to_ignore is not None and obj in to_ignore: continue num_pixels = torch.sum(mask[i] == obj, dtype=torch.float) weight_mask[i, mask[i] == obj] = 1 / num_pixels else: weight_mask = torch.ones_like(mask) if to_ignore is not None: for obj in to_ignore: weight_mask[mask == obj] = 0 return weight_mask class CELossWeightedNew(WeightedLoss): """ Compute weighted CE loss with logits """ def __init__(self, weighted=False): super(CELossWeightedNew, self).__init__() self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none') self.weighted = weighted def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Guangyun-Xu/uois
CELossWeighted
false
13,729
[ "MIT" ]
106
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
Conv2d_GN_ReLU
import torch import torch.nn as nn class Conv2d_GN_ReLU(nn.Module): """ Implements a module that performs conv2d + groupnorm + ReLU + Assumes kernel size is odd """ def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1 ): super(Conv2d_GN_ReLU, self).__init__() padding = 0 if ksize < 2 else ksize // 2 self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, stride=stride, padding=padding, bias=False) self.gn1 = nn.GroupNorm(num_groups, out_channels) self.relu1 = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) out = self.gn1(out) out = self.relu1(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'num_groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = 0.0 tmp31 = tmp29 <= tmp30 tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask) tl.store(out_ptr3 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_threshold_backward_0[grid(4)]( buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 return buf5, primals_1, primals_2, primals_3, buf0, reinterpret_tensor(buf1 , (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf6 class Conv2d_GN_ReLUNew(nn.Module): """ Implements a module that performs conv2d + groupnorm + ReLU + Assumes kernel size is odd """ def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1 ): super(Conv2d_GN_ReLUNew, self).__init__() padding = 0 if ksize < 2 else ksize // 2 self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, stride=stride, padding=padding, bias=False) self.gn1 = nn.GroupNorm(num_groups, out_channels) self.relu1 = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.gn1.weight primals_4 = self.gn1.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Guangyun-Xu/uois
Conv2d_GN_ReLU
false
13,730
[ "MIT" ]
106
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
CosAttention
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(self, num_heads, out_channels): super(GatAttention, self).__init__() self.num_heads = num_heads self.out_channels = out_channels self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.reset_parameters() def reset_parameters(self): pass def forward(self, neighbor_vecs, self_vecs): alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = F.leaky_relu(alpha, negative_slope=0.2) return alpha class CosAttention(GatAttention): def forward(self, neighbor_vecs, self_vecs): alpha = (neighbor_vecs * self.att_neighbor_weight * self_vecs * self.att_self_weight) alpha = alpha.sum(dim=-1) return alpha def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_heads': 4, 'out_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp9 = tmp7 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 * tmp12 tmp14 = tmp6 + tmp13 tmp17 = tmp15 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 * tmp20 tmp22 = tmp14 + tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 * tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](primals_2, primals_1, primals_3, primals_4, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf0, primals_1, primals_2, primals_3, primals_4 class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(self, num_heads, out_channels): super(GatAttention, self).__init__() self.num_heads = num_heads self.out_channels = out_channels self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.reset_parameters() def reset_parameters(self): pass def forward(self, neighbor_vecs, self_vecs): alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = F.leaky_relu(alpha, negative_slope=0.2) return alpha class CosAttentionNew(GatAttention): def forward(self, input_0, input_1): primals_1 = self.att_self_weight primals_4 = self.att_neighbor_weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
GraphNAS/GraphNAS
CosAttention
false
13,731
[ "Apache-2.0" ]
94
b4f05bb10b8b96bb9e82344bfae36a23db2431a6
https://github.com/GraphNAS/GraphNAS/tree/b4f05bb10b8b96bb9e82344bfae36a23db2431a6
Downsampler
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def bilinear_kernel(size, normalize=False): """ Make a 2D bilinear kernel suitable for upsampling/downsampling with normalize=False/True. The kernel is size x size square. Take size: kernel size (square) normalize: whether kernel sums to 1 (True) or not Give kernel: np.array with bilinear kernel coefficient """ factor = (size + 1) // 2 if size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:size, :size] kernel = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) if normalize: kernel /= kernel.sum() return kernel class Interpolator(nn.Module): """ Interpolate by de/up/backward convolution with a bilinear kernel. Take channel_dim: the input channel dimension rate: upsampling rate, that is 4 -> 4x upsampling odd: the kernel parity, which is too much to explain here for now, but will be handled automagically in the future, promise. normalize: whether kernel sums to 1 """ def __init__(self, channel_dim, rate, odd=True, normalize=False): super().__init__() self.rate = rate ksize = rate * 2 if odd: ksize -= 1 kernel = torch.from_numpy(bilinear_kernel(ksize, normalize)) weight = torch.zeros(channel_dim, channel_dim, ksize, ksize) for k in range(channel_dim): weight[k, k] = kernel self.weight = nn.Parameter(weight, requires_grad=False) def forward(self, x): return F.conv_transpose2d(x, self.weight, stride=self.rate) class Downsampler(Interpolator): """ Downsample with a normalized bilinear kernel. """ def __init__(self, channel_dim, rate, odd=True): super().__init__(channel_dim, rate, odd, True) def forward(self, x): return F.conv2d(x, self.weight, stride=self.rate) def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'channel_dim': 4, 'rate': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 196 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 225 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 900 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 225 * y3), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 7, 7), (196, 49, 7, 1)) assert_size_stride(arg1_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch .float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4096)](arg1_1, buf0, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 7, 7), (196, 1, 28, 4), torch.float32) triton_poi_fused_convolution_1[grid(16, 49)](arg0_1, buf1, 16, 49, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 buf2 = extern_kernels.convolution(buf0, buf1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 15, 15), (900, 1, 60, 4)) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 15, 15), (900, 225, 15, 1), torch. float32) triton_poi_fused_convolution_2[grid(16, 225)](buf2, buf3, 16, 225, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf2 return buf3, def bilinear_kernel(size, normalize=False): """ Make a 2D bilinear kernel suitable for upsampling/downsampling with normalize=False/True. The kernel is size x size square. Take size: kernel size (square) normalize: whether kernel sums to 1 (True) or not Give kernel: np.array with bilinear kernel coefficient """ factor = (size + 1) // 2 if size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:size, :size] kernel = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) if normalize: kernel /= kernel.sum() return kernel class Interpolator(nn.Module): """ Interpolate by de/up/backward convolution with a bilinear kernel. Take channel_dim: the input channel dimension rate: upsampling rate, that is 4 -> 4x upsampling odd: the kernel parity, which is too much to explain here for now, but will be handled automagically in the future, promise. normalize: whether kernel sums to 1 """ def __init__(self, channel_dim, rate, odd=True, normalize=False): super().__init__() self.rate = rate ksize = rate * 2 if odd: ksize -= 1 kernel = torch.from_numpy(bilinear_kernel(ksize, normalize)) weight = torch.zeros(channel_dim, channel_dim, ksize, ksize) for k in range(channel_dim): weight[k, k] = kernel self.weight = nn.Parameter(weight, requires_grad=False) def forward(self, x): return F.conv_transpose2d(x, self.weight, stride=self.rate) class DownsamplerNew(Interpolator): """ Downsample with a normalized bilinear kernel. """ def __init__(self, channel_dim, rate, odd=True): super().__init__(channel_dim, rate, odd, True) def forward(self, input_0): arg0_1 = self.weight arg1_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
Global19/revolver
Downsampler
false
13,732
[ "BSD-2-Clause" ]
151
200082798d862516de6d9aa18e863a5968127a3f
https://github.com/Global19/revolver/tree/200082798d862516de6d9aa18e863a5968127a3f
GatAttention
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(self, num_heads, out_channels): super(GatAttention, self).__init__() self.num_heads = num_heads self.out_channels = out_channels self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.reset_parameters() def reset_parameters(self): pass def forward(self, neighbor_vecs, self_vecs): alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = F.leaky_relu(alpha, negative_slope=0.2) return alpha def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_heads': 4, 'out_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_leaky_relu_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp31 = 0.0 tmp32 = tmp30 > tmp31 tmp33 = 0.2 tmp34 = tmp30 * tmp33 tmp35 = tl.where(tmp32, tmp30, tmp34) tl.store(out_ptr1 + x2, tmp32, xmask) tl.store(out_ptr2 + x2, tmp35, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_leaky_relu_mul_sum_0[grid(64)](primals_2, primals_1, primals_4, primals_3, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_3 return buf2, primals_2, primals_4, buf1 class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttentionNew(ConstAttention): def __init__(self, num_heads, out_channels): super(GatAttentionNew, self).__init__() self.num_heads = num_heads self.out_channels = out_channels self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.reset_parameters() def reset_parameters(self): pass def forward(self, input_0, input_1): primals_1 = self.att_self_weight primals_3 = self.att_neighbor_weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
GraphNAS/GraphNAS
GatAttention
false
13,733
[ "Apache-2.0" ]
94
b4f05bb10b8b96bb9e82344bfae36a23db2431a6
https://github.com/GraphNAS/GraphNAS/tree/b4f05bb10b8b96bb9e82344bfae36a23db2431a6
VAEEncoder
import torch import torch.nn as nn import torch.nn.functional as F class VAEEncoder(nn.Module): def __init__(self, z_size): super(VAEEncoder, self).__init__() self.conv1 = nn.Conv2d(3, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, z_size) self.fc_log_var = nn.Linear(2 * 2 * 256, z_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(-1, 2 * 2 * 256) mu = self.fc_mu(x) log_var = self.fc_log_var(x) std = torch.exp(0.5 * log_var) eps = torch.randn_like(std) z = mu + eps * std return z, mu, log_var def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'z_size': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask) @triton.jit def triton_poi_fused_add_exp_mul_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 1024), (1024, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 4, 4), (48, 1, 12, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(96, 16)](primals_1, buf0, 96, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch. float32) triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(123008)](buf6, primals_2, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 14, 14), (12544, 1, 896, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(50176)](buf8, primals_5, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 6, 6), (4608, 1, 768, 128)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(18432)](buf10, primals_7, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 2, 2), (1024, 1, 512, 256)) buf12 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) buf18 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(1024, 4)]( buf11, primals_9, buf12, buf18, 1024, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del buf11 del primals_9 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf13) del primals_11 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf14) del primals_13 buf15 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf16 = buf15 del buf15 buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_exp_mul_9[grid(16)](buf13, buf16, buf14, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) return (buf17, buf13, buf14, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), buf14, buf16, primals_12, primals_10, buf18) class VAEEncoderNew(nn.Module): def __init__(self, z_size): super(VAEEncoderNew, self).__init__() self.conv1 = nn.Conv2d(3, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, z_size) self.fc_log_var = nn.Linear(2 * 2 * 256, z_size) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc_mu.weight primals_11 = self.fc_mu.bias primals_12 = self.fc_log_var.weight primals_13 = self.fc_log_var.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1], output[2]
GSSJacky/neural-painters-pytorch
VAEEncoder
false
13,734
[ "MIT" ]
138
017b32f1eced4c36e6ae15b73b52b9682994d3e6
https://github.com/GSSJacky/neural-painters-pytorch/tree/017b32f1eced4c36e6ae15b73b52b9682994d3e6
Interpolator
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def bilinear_kernel(size, normalize=False): """ Make a 2D bilinear kernel suitable for upsampling/downsampling with normalize=False/True. The kernel is size x size square. Take size: kernel size (square) normalize: whether kernel sums to 1 (True) or not Give kernel: np.array with bilinear kernel coefficient """ factor = (size + 1) // 2 if size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:size, :size] kernel = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) if normalize: kernel /= kernel.sum() return kernel class Interpolator(nn.Module): """ Interpolate by de/up/backward convolution with a bilinear kernel. Take channel_dim: the input channel dimension rate: upsampling rate, that is 4 -> 4x upsampling odd: the kernel parity, which is too much to explain here for now, but will be handled automagically in the future, promise. normalize: whether kernel sums to 1 """ def __init__(self, channel_dim, rate, odd=True, normalize=False): super().__init__() self.rate = rate ksize = rate * 2 if odd: ksize -= 1 kernel = torch.from_numpy(bilinear_kernel(ksize, normalize)) weight = torch.zeros(channel_dim, channel_dim, ksize, ksize) for k in range(channel_dim): weight[k, k] = kernel self.weight = nn.Parameter(weight, requires_grad=False) def forward(self, x): return F.conv_transpose2d(x, self.weight, stride=self.rate) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel_dim': 4, 'rate': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 196 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 361 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 1444 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 361 * y3), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 7, 7), (196, 49, 7, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 7, 7), (196, 1, 28, 4), torch.float32) triton_poi_fused_convolution_1[grid(16, 49)](arg0_1, buf1, 16, 49, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 buf2 = extern_kernels.convolution(buf0, buf1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 19, 19), (1444, 1, 76, 4)) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 19, 19), (1444, 361, 19, 1), torch .float32) triton_poi_fused_convolution_2[grid(16, 361)](buf2, buf3, 16, 361, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf2 return buf3, def bilinear_kernel(size, normalize=False): """ Make a 2D bilinear kernel suitable for upsampling/downsampling with normalize=False/True. The kernel is size x size square. Take size: kernel size (square) normalize: whether kernel sums to 1 (True) or not Give kernel: np.array with bilinear kernel coefficient """ factor = (size + 1) // 2 if size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:size, :size] kernel = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) if normalize: kernel /= kernel.sum() return kernel class InterpolatorNew(nn.Module): """ Interpolate by de/up/backward convolution with a bilinear kernel. Take channel_dim: the input channel dimension rate: upsampling rate, that is 4 -> 4x upsampling odd: the kernel parity, which is too much to explain here for now, but will be handled automagically in the future, promise. normalize: whether kernel sums to 1 """ def __init__(self, channel_dim, rate, odd=True, normalize=False): super().__init__() self.rate = rate ksize = rate * 2 if odd: ksize -= 1 kernel = torch.from_numpy(bilinear_kernel(ksize, normalize)) weight = torch.zeros(channel_dim, channel_dim, ksize, ksize) for k in range(channel_dim): weight[k, k] = kernel self.weight = nn.Parameter(weight, requires_grad=False) def forward(self, input_0): arg0_1 = self.weight arg1_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
Global19/revolver
Interpolator
false
13,735
[ "BSD-2-Clause" ]
151
200082798d862516de6d9aa18e863a5968127a3f
https://github.com/Global19/revolver/tree/200082798d862516de6d9aa18e863a5968127a3f
TransformerEncoderLayer
import math import torch import torch.nn.functional as F import torch.nn as nn def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerEncoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(p=dropout) def forward(self, tensor, mask): tensor = tensor + self.dropout(self.attention(tensor, mask=mask)) tensor = _normalize(tensor, self.norm1) tensor = tensor + self.dropout(self.ffn(tensor)) tensor = _normalize(tensor, self.norm2) tensor *= mask.unsqueeze(-1).float() return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_4, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_8, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_6, buf5, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(64)](primals_2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18, primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, primals_2, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_2, primals_11, primals_17, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor( buf20, (16, 4), (4, 1), 0), primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerEncoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0, input_1): primals_2 = self.attention.q_lin.weight primals_4 = self.attention.q_lin.bias primals_3 = self.attention.k_lin.weight primals_6 = self.attention.k_lin.bias primals_5 = self.attention.v_lin.weight primals_8 = self.attention.v_lin.bias primals_7 = self.attention.out_lin.weight primals_10 = self.attention.out_lin.bias primals_11 = self.norm1.weight primals_12 = self.norm1.bias primals_9 = self.ffn.lin1.weight primals_14 = self.ffn.lin1.bias primals_13 = self.ffn.lin2.weight primals_16 = self.ffn.lin2.bias primals_17 = self.norm2.weight primals_18 = self.norm2.bias primals_1 = input_0 primals_15 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
Guaguago/Persona-Dialogue-Generation
TransformerEncoderLayer
false
13,736
[ "MIT" ]
258
0d4526ec8eddff62751a70666e14d72103906f44
https://github.com/Guaguago/Persona-Dialogue-Generation/tree/0d4526ec8eddff62751a70666e14d72103906f44
SpeakNet
import math import torch import torch.nn as nn def xavier_init(module): """Xavier initializer for module parameters.""" for parameter in module.parameters(): if len(parameter.data.shape) == 1: parameter.data.fill_(0) else: fan_in = parameter.data.size(0) fan_out = parameter.data.size(1) parameter.data.normal_(0, math.sqrt(2 / (fan_in + fan_out))) class SpeakNet(nn.Module): """Module for speaking a token based on current state. In ``forward``: Return a probability distribution of utterances of tokens. """ def __init__(self, state_size, out_size): super().__init__() self.net = nn.Linear(state_size, out_size) self.softmax = nn.Softmax() xavier_init(self) def forward(self, state): out_distr = self.softmax(self.net(state)) return out_distr def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 def xavier_init(module): """Xavier initializer for module parameters.""" for parameter in module.parameters(): if len(parameter.data.shape) == 1: parameter.data.fill_(0) else: fan_in = parameter.data.size(0) fan_out = parameter.data.size(1) parameter.data.normal_(0, math.sqrt(2 / (fan_in + fan_out))) class SpeakNetNew(nn.Module): """Module for speaking a token based on current state. In ``forward``: Return a probability distribution of utterances of tokens. """ def __init__(self, state_size, out_size): super().__init__() self.net = nn.Linear(state_size, out_size) self.softmax = nn.Softmax() xavier_init(self) def forward(self, input_0): primals_1 = self.net.weight primals_2 = self.net.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Guaguago/Persona-Dialogue-Generation
SpeakNet
false
13,737
[ "MIT" ]
258
0d4526ec8eddff62751a70666e14d72103906f44
https://github.com/Guaguago/Persona-Dialogue-Generation/tree/0d4526ec8eddff62751a70666e14d72103906f44
VAEDecoder
import torch import torch.nn as nn import torch.nn.functional as F class VAEDecoder(nn.Module): def __init__(self, z_size): super(VAEDecoder, self).__init__() self.fc = nn.Linear(z_size, 4 * 256) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2) def forward(self, x): x = self.fc(x) x = x.view(-1, 4 * 256, 1, 1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) x = torch.sigmoid(self.deconv4(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'z_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 108 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_7(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 192 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1024, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 3, 6, 6), (108, 36, 6, 1)) assert_size_stride(primals_11, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(131072, 25)](primals_4, buf0, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch .float32) triton_poi_fused_2[grid(2048, 36)](primals_8, buf2, 2048, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((32, 3, 6, 6), (108, 1, 18, 3), torch.float32 ) triton_poi_fused_3[grid(96, 36)](primals_10, buf3, 96, 36, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_1 del primals_2 buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (64, 1024, 1, 1), (1024, 1, 1, 1), 0), buf0, stride=(2, 2), padding= (0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (64, 128, 5, 5), (3200, 1, 640, 128)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_4[grid(204800)](buf6, primals_5, 204800, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (64, 64, 13, 13), (10816, 1, 832, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_5[grid(692224)](buf8, primals_7, 692224, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf9 = extern_kernels.convolution(buf8, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (64, 32, 30, 30), (28800, 1, 960, 32)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_6[grid(1843200)](buf10, primals_9, 1843200, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf11 = extern_kernels.convolution(buf10, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (64, 3, 64, 64), (12288, 1, 192, 3)) buf12 = empty_strided_cuda((64, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_7[grid(192, 4096)](buf11, primals_11, buf12, 192, 4096, XBLOCK=1024, YBLOCK=1, num_warps= 4, num_stages=1) del buf11 del primals_11 return buf12, buf0, buf1, buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 1024, 1, 1), (1024, 1, 1, 1), 0), buf6, buf8, buf10, buf12 class VAEDecoderNew(nn.Module): def __init__(self, z_size): super(VAEDecoderNew, self).__init__() self.fc = nn.Linear(z_size, 4 * 256) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.deconv1.weight primals_5 = self.deconv1.bias primals_6 = self.deconv2.weight primals_7 = self.deconv2.bias primals_8 = self.deconv3.weight primals_9 = self.deconv3.bias primals_10 = self.deconv4.weight primals_11 = self.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
GSSJacky/neural-painters-pytorch
VAEDecoder
false
13,738
[ "MIT" ]
138
017b32f1eced4c36e6ae15b73b52b9682994d3e6
https://github.com/GSSJacky/neural-painters-pytorch/tree/017b32f1eced4c36e6ae15b73b52b9682994d3e6
GatSymAttention
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(self, num_heads, out_channels): super(GatAttention, self).__init__() self.num_heads = num_heads self.out_channels = out_channels self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.reset_parameters() def reset_parameters(self): pass def forward(self, neighbor_vecs, self_vecs): alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = F.leaky_relu(alpha, negative_slope=0.2) return alpha class GatSymAttention(GatAttention): def forward(self, neighbor_vecs, self_vecs): alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = F.leaky_relu(alpha, negative_slope=0.2) alpha_2 = (neighbor_vecs * self.att_self_weight).sum(dim=-1) + ( self_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = alpha + alpha_2 return alpha def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_heads': 4, 'out_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_leaky_relu_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp31 = tmp15 * tmp1 tmp32 = tmp18 * tmp4 tmp33 = tmp31 + tmp32 tmp34 = tmp22 * tmp8 tmp35 = tmp33 + tmp34 tmp36 = tmp26 * tmp12 tmp37 = tmp35 + tmp36 tmp38 = tmp0 * tmp16 tmp39 = tmp3 * tmp19 tmp40 = tmp38 + tmp39 tmp41 = tmp7 * tmp23 tmp42 = tmp40 + tmp41 tmp43 = tmp11 * tmp27 tmp44 = tmp42 + tmp43 tmp45 = tmp37 + tmp44 tmp46 = 0.0 tmp47 = tmp30 > tmp46 tmp48 = 0.2 tmp49 = tmp30 * tmp48 tmp50 = tl.where(tmp47, tmp30, tmp49) tmp51 = tmp50 + tmp45 tl.store(out_ptr1 + x2, tmp47, xmask) tl.store(in_out_ptr0 + x2, tmp51, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused_add_leaky_relu_mul_sum_0[grid(64)](buf3, primals_2, primals_1, primals_4, primals_3, buf1, 64, XBLOCK=64, num_warps =1, num_stages=1) del primals_1 del primals_3 return buf3, primals_2, primals_4, buf1 class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(self, num_heads, out_channels): super(GatAttention, self).__init__() self.num_heads = num_heads self.out_channels = out_channels self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads, self.out_channels)) self.reset_parameters() def reset_parameters(self): pass def forward(self, neighbor_vecs, self_vecs): alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = F.leaky_relu(alpha, negative_slope=0.2) return alpha class GatSymAttentionNew(GatAttention): def forward(self, input_0, input_1): primals_1 = self.att_self_weight primals_3 = self.att_neighbor_weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
GraphNAS/GraphNAS
GatSymAttention
false
13,739
[ "Apache-2.0" ]
94
b4f05bb10b8b96bb9e82344bfae36a23db2431a6
https://github.com/GraphNAS/GraphNAS/tree/b4f05bb10b8b96bb9e82344bfae36a23db2431a6
SVHNConvNet
import torch from torch import nn import torch.nn.functional as F class SVHNConvNet(nn.Module): def __init__(self): super(SVHNConvNet, self).__init__() self.conv1 = nn.Conv2d(3, 32, 5, 1, 2) self.conv2 = nn.Conv2d(32, 64, 5, 1, 2) self.conv3 = nn.Conv2d(64, 128, 5, 1, 2) self.conv4 = nn.Conv2d(128, 256, 5, 1, 2) self.fc1 = nn.Linear(3 * 3 * 256, 64) self.fc2 = nn.Linear(64, 1) def forward(self, x): x_shape = x.shape assert len(x_shape) == 4, x.shape x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv3(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv4(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 3 * 3 * 256) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 3, 48, 48])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 2304 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 2304 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 6912 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 % 24 x2 = xindex // 768 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 3072 * x2), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 3072 * x2), None) tmp3 = tl.load(in_ptr0 + (1536 + x0 + 64 * x1 + 3072 * x2), None) tmp5 = tl.load(in_ptr0 + (1568 + x0 + 64 * x1 + 3072 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 12 x2 = xindex // 768 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3072 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3072 * x2), None) tmp3 = tl.load(in_ptr0 + (1536 + x0 + 128 * x1 + 3072 * x2), None) tmp5 = tl.load(in_ptr0 + (1600 + x0 + 128 * x1 + 3072 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 6 x2 = xindex // 768 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 3072 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 3072 * x2), None) tmp3 = tl.load(in_ptr0 + (1536 + x0 + 256 * x1 + 3072 * x2), None) tmp5 = tl.load(in_ptr0 + (1664 + x0 + 256 * x1 + 3072 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 36 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y5 = yindex y4 = yindex // 9 y6 = yindex % 9 tmp0 = tl.load(in_ptr0 + (x2 + 512 * y0 + 3072 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (256 + x2 + 512 * y0 + 3072 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1536 + x2 + 512 * y0 + 3072 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1792 + x2 + 512 * y0 + 3072 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 256 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 9 * x2 + 2304 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 3, 48, 48), (6912, 2304, 48, 1)) assert_size_stride(primals_2, (32, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (64, 2304), (2304, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (1, 64), (64, 1)) assert_size_stride(primals_13, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 48, 48), (6912, 1, 144, 3), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 2304)](primals_1, buf0, 12, 2304, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((32, 3, 5, 5), (75, 1, 15, 3), torch.float32) triton_poi_fused_1[grid(96, 25)](primals_2, buf1, 96, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch. float32) triton_poi_fused_2[grid(2048, 25)](primals_4, buf2, 2048, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_3[grid(8192, 25)](primals_6, buf3, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_4[grid(32768, 25)](primals_8, buf4, 32768, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 48, 48), (73728, 1, 1536, 32)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(294912)](buf6, primals_3, 294912, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf7 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32) buf8 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(73728)](buf6, buf7, buf8, 73728, XBLOCK=512, num_warps=8, num_stages=1) buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 24, 24), (36864, 1, 1536, 64)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(147456)](buf10, primals_5, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf11 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32) buf12 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(36864)](buf10, buf11, buf12, 36864, XBLOCK=512, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 12, 12), (18432, 1, 1536, 128)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_9[grid(73728)](buf14, primals_7, 73728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf15 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32) buf16 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(18432)](buf14, buf15, buf16, 18432, XBLOCK=256, num_warps=4, num_stages=1) buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 6, 6), (9216, 1, 1536, 256)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_11[grid(36864)](buf18, primals_9, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf19 = empty_strided_cuda((4, 256, 3, 3), (2304, 1, 768, 256), torch.int8) buf20 = empty_strided_cuda((4, 256, 3, 3), (2304, 9, 3, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_12[grid(36, 256)](buf18, buf19, buf20, 36, 256, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (4, 2304), (2304, 1), 0 ), reinterpret_tensor(primals_10, (2304, 64), (1, 2304), 0), out=buf21) buf22 = buf21 del buf21 triton_poi_fused_relu_13[grid(256)](buf22, primals_11, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_11 buf24 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_13, buf22, reinterpret_tensor( primals_12, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf24) del primals_13 return (buf24, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor (buf20, (4, 2304), (2304, 1), 0), buf22, primals_12, primals_10) class SVHNConvNetNew(nn.Module): def __init__(self): super(SVHNConvNetNew, self).__init__() self.conv1 = nn.Conv2d(3, 32, 5, 1, 2) self.conv2 = nn.Conv2d(32, 64, 5, 1, 2) self.conv3 = nn.Conv2d(64, 128, 5, 1, 2) self.conv4 = nn.Conv2d(128, 256, 5, 1, 2) self.fc1 = nn.Linear(3 * 3 * 256, 64) self.fc2 = nn.Linear(64, 1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.fc2.weight primals_13 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Felix-Petersen/algovision
SVHNConvNet
false
13,740
[ "MIT" ]
52
b1b9596028af62de1c1d2c4e74cbd6168fc3ae3c
https://github.com/Felix-Petersen/algovision/tree/b1b9596028af62de1c1d2c4e74cbd6168fc3ae3c
CELossWeightedMasked
import torch import torch.nn as nn class WeightedLoss(nn.Module): def __init__(self): super(WeightedLoss, self).__init__() self.weighted = False def generate_weight_mask(self, mask, to_ignore=None): """ Generates a weight mask where pixel weights are inversely proportional to how many pixels are in the class @param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table. @param to_ignore: a list of classes (integers) to ignore when creating mask @return: a torch.FloatTensor that is same shape as mask. """ N = mask.shape[0] if self.weighted: weight_mask = torch.zeros_like(mask).float() for i in range(N): unique_object_labels = torch.unique(mask[i]) for obj in unique_object_labels: if to_ignore is not None and obj in to_ignore: continue num_pixels = torch.sum(mask[i] == obj, dtype=torch.float) weight_mask[i, mask[i] == obj] = 1 / num_pixels else: weight_mask = torch.ones_like(mask) if to_ignore is not None: for obj in to_ignore: weight_mask[mask == obj] = 0 return weight_mask class CELossWeightedMasked(WeightedLoss): """ Compute weighted CE loss with logits """ def __init__(self, weighted=False): super(CELossWeightedMasked, self).__init__() self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none') self.weighted = weighted def forward(self, x, target, fg_mask): """ Compute weighted cross entropy @param x: a [N x C x H x W] torch.FloatTensor of values @param target: a [N x H x W] torch.LongTensor of values @param fg_mask: a [N x H x W] torch.LongTensor of values in {0, 1, 2, ...} """ temp = self.CrossEntropyLoss(x, target) weight_mask = self.generate_weight_mask(fg_mask, to_ignore=[0, 1]) loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tl.store(out_ptr0 + x2, tmp27, xmask) @triton.jit def triton_per_fused__log_softmax_div_index_put_lift_fresh_mul_neg_ones_like_sum_2( in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 64 tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = tl.where(tmp2, tmp1, tmp3) tmp5 = tmp0 == tmp3 tmp6 = tl.where(tmp5, tmp1, tmp4) tmp8 = tmp7 * tmp6 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tl.broadcast_to(tmp6, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = tmp11 / tmp14 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf0, arg0_1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf0 buf4 = empty_strided_cuda((), (), torch.float32) buf6 = buf4 del buf4 triton_per_fused__log_softmax_div_index_put_lift_fresh_mul_neg_ones_like_sum_2[ grid(1)](buf6, arg2_1, buf3, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf3 return buf6, class WeightedLoss(nn.Module): def __init__(self): super(WeightedLoss, self).__init__() self.weighted = False def generate_weight_mask(self, mask, to_ignore=None): """ Generates a weight mask where pixel weights are inversely proportional to how many pixels are in the class @param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table. @param to_ignore: a list of classes (integers) to ignore when creating mask @return: a torch.FloatTensor that is same shape as mask. """ N = mask.shape[0] if self.weighted: weight_mask = torch.zeros_like(mask).float() for i in range(N): unique_object_labels = torch.unique(mask[i]) for obj in unique_object_labels: if to_ignore is not None and obj in to_ignore: continue num_pixels = torch.sum(mask[i] == obj, dtype=torch.float) weight_mask[i, mask[i] == obj] = 1 / num_pixels else: weight_mask = torch.ones_like(mask) if to_ignore is not None: for obj in to_ignore: weight_mask[mask == obj] = 0 return weight_mask class CELossWeightedMaskedNew(WeightedLoss): """ Compute weighted CE loss with logits """ def __init__(self, weighted=False): super(CELossWeightedMaskedNew, self).__init__() self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none') self.weighted = weighted def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Guangyun-Xu/uois
CELossWeightedMasked
false
13,741
[ "MIT" ]
106
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
DistanceWiseRKD
import torch from torch import nn import torch.nn.functional as F def euclidean_distance(pred, squared=False, eps=1e-12): """Calculate the Euclidean distance between the two examples in the output representation space. Args: pred (torch.Tensor): The prediction of the teacher or student with shape (N, C). squared (bool): Whether to calculate the squared Euclidean distance. Defaults to False. eps (float): The minimum Euclidean distance between the two examples. Defaults to 1e-12. """ pred_square = pred.pow(2).sum(dim=-1) prod = torch.mm(pred, pred.t()) distance = (pred_square.unsqueeze(1) + pred_square.unsqueeze(0) - 2 * prod ).clamp(min=eps) if not squared: distance = distance.sqrt() distance = distance.clone() distance[range(len(prod)), range(len(prod))] = 0 return distance class DistanceWiseRKD(nn.Module): """PyTorch version of distance-wise loss of `Relational Knowledge Distillation. <https://arxiv.org/abs/1904.05068>`_. Args: loss_weight (float): Weight of distance-wise distillation loss. Defaults to 25.0. with_l2_norm (bool): Whether to normalize the model predictions before calculating the loss. Defaults to True. """ def __init__(self, loss_weight=25.0, with_l2_norm=True): super(DistanceWiseRKD, self).__init__() self.loss_weight = loss_weight self.with_l2_norm = with_l2_norm def distance_loss(self, preds_S, preds_T): """Calculate distance-wise distillation loss.""" d_T = euclidean_distance(preds_T, squared=False) mean_d_T = d_T[d_T > 0].mean() d_T = d_T / mean_d_T d_S = euclidean_distance(preds_S, squared=False) mean_d_S = d_S[d_S > 0].mean() d_S = d_S / mean_d_S return F.smooth_l1_loss(d_S, d_T) def forward(self, preds_S, preds_T): """Forward computation. Args: preds_S (torch.Tensor): The student model prediction with shape (N, C, H, W) or shape (N, C). preds_T (torch.Tensor): The teacher model prediction with shape (N, C, H, W) or shape (N, C). Return: torch.Tensor: The calculated loss value. """ preds_S = preds_S.view(preds_S.shape[0], -1) preds_T = preds_T.view(preds_T.shape[0], -1) if self.with_l2_norm: preds_S = F.normalize(preds_S, p=2, dim=1) preds_T = F.normalize(preds_T, p=2, dim=1) loss = self.distance_loss(preds_S, preds_T) * self.loss_weight return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = 1e-12 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp0 / tmp8 tl.store(out_ptr1 + (r1 + 64 * x0), tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_linalg_vector_norm_0[grid(4)](arg0_1, buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32) triton_per_fused_div_linalg_vector_norm_0[grid(4)](arg1_1, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 return buf1, buf3 def euclidean_distance(pred, squared=False, eps=1e-12): """Calculate the Euclidean distance between the two examples in the output representation space. Args: pred (torch.Tensor): The prediction of the teacher or student with shape (N, C). squared (bool): Whether to calculate the squared Euclidean distance. Defaults to False. eps (float): The minimum Euclidean distance between the two examples. Defaults to 1e-12. """ pred_square = pred.pow(2).sum(dim=-1) prod = torch.mm(pred, pred.t()) distance = (pred_square.unsqueeze(1) + pred_square.unsqueeze(0) - 2 * prod ).clamp(min=eps) if not squared: distance = distance.sqrt() distance = distance.clone() distance[range(len(prod)), range(len(prod))] = 0 return distance class DistanceWiseRKDNew(nn.Module): """PyTorch version of distance-wise loss of `Relational Knowledge Distillation. <https://arxiv.org/abs/1904.05068>`_. Args: loss_weight (float): Weight of distance-wise distillation loss. Defaults to 25.0. with_l2_norm (bool): Whether to normalize the model predictions before calculating the loss. Defaults to True. """ def __init__(self, loss_weight=25.0, with_l2_norm=True): super(DistanceWiseRKDNew, self).__init__() self.loss_weight = loss_weight self.with_l2_norm = with_l2_norm def distance_loss(self, preds_S, preds_T): """Calculate distance-wise distillation loss.""" d_T = euclidean_distance(preds_T, squared=False) mean_d_T = d_T[d_T > 0].mean() d_T = d_T / mean_d_T d_S = euclidean_distance(preds_S, squared=False) mean_d_S = d_S[d_S > 0].mean() d_S = d_S / mean_d_S return F.smooth_l1_loss(d_S, d_T) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
HIT-cwh/mmrazor
DistanceWiseRKD
false
13,742
[ "Apache-2.0" ]
553
2dad24044d7f1dad88f20221f8fc071dd40fdd4f
https://github.com/HIT-cwh/mmrazor/tree/2dad24044d7f1dad88f20221f8fc071dd40fdd4f
KLDivergence
import torch from torch import nn import torch.nn.functional as F class KLDivergence(nn.Module): """A measure of how one probability distribution Q is different from a second, reference probability distribution P. Args: tau (float): Temperature coefficient. Defaults to 1.0. reduction (str): Specifies the reduction to apply to the loss: ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. ``'none'``: no reduction will be applied, ``'batchmean'``: the sum of the output will be divided by the batchsize, ``'sum'``: the output will be summed, ``'mean'``: the output will be divided by the number of elements in the output. Default: ``'batchmean'`` loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, tau=1.0, reduction='batchmean', loss_weight=1.0): super(KLDivergence, self).__init__() self.tau = tau self.loss_weight = loss_weight accept_reduction = {'none', 'batchmean', 'sum', 'mean'} assert reduction in accept_reduction, f'KLDivergence supports reduction {accept_reduction}, but gets {reduction}.' self.reduction = reduction def forward(self, preds_S, preds_T): """Forward computation. Args: preds_S (torch.Tensor): The student model prediction with shape (N, C, H, W) or shape (N, C). preds_T (torch.Tensor): The teacher model prediction with shape (N, C, H, W) or shape (N, C). Return: torch.Tensor: The calculated loss value. """ preds_T = preds_T.detach() softmax_pred_T = F.softmax(preds_T / self.tau, dim=1) logsoftmax_preds_S = F.log_softmax(preds_S / self.tau, dim=1) loss = self.tau ** 2 * F.kl_div(logsoftmax_preds_S, softmax_pred_T, reduction=self.reduction) return self.loss_weight * loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tmp40 = tmp39 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp40, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg1_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class KLDivergenceNew(nn.Module): """A measure of how one probability distribution Q is different from a second, reference probability distribution P. Args: tau (float): Temperature coefficient. Defaults to 1.0. reduction (str): Specifies the reduction to apply to the loss: ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. ``'none'``: no reduction will be applied, ``'batchmean'``: the sum of the output will be divided by the batchsize, ``'sum'``: the output will be summed, ``'mean'``: the output will be divided by the number of elements in the output. Default: ``'batchmean'`` loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, tau=1.0, reduction='batchmean', loss_weight=1.0): super(KLDivergenceNew, self).__init__() self.tau = tau self.loss_weight = loss_weight accept_reduction = {'none', 'batchmean', 'sum', 'mean'} assert reduction in accept_reduction, f'KLDivergence supports reduction {accept_reduction}, but gets {reduction}.' self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
HIT-cwh/mmrazor
KLDivergence
false
13,743
[ "Apache-2.0" ]
553
2dad24044d7f1dad88f20221f8fc071dd40fdd4f
https://github.com/HIT-cwh/mmrazor/tree/2dad24044d7f1dad88f20221f8fc071dd40fdd4f
Conv2d_GN_ReLUx2
import torch import torch.nn as nn class Conv2d_GN_ReLU(nn.Module): """ Implements a module that performs conv2d + groupnorm + ReLU + Assumes kernel size is odd """ def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1 ): super(Conv2d_GN_ReLU, self).__init__() padding = 0 if ksize < 2 else ksize // 2 self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, stride=stride, padding=padding, bias=False) self.gn1 = nn.GroupNorm(num_groups, out_channels) self.relu1 = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) out = self.gn1(out) out = self.relu1(out) return out class Conv2d_GN_ReLUx2(nn.Module): """ Implements a module that performs conv2d + groupnorm + ReLU + conv2d + groupnorm + ReLU (and a possible downsampling operation) Assumes kernel size is odd """ def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1 ): super(Conv2d_GN_ReLUx2, self).__init__() self.layer1 = Conv2d_GN_ReLU(in_channels, out_channels, num_groups, ksize=ksize, stride=stride) self.layer2 = Conv2d_GN_ReLU(out_channels, out_channels, num_groups, ksize=ksize, stride=stride) def forward(self, x): out = self.layer1(x) out = self.layer2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'num_groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = 0.0 tmp31 = tmp29 <= tmp30 tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask) tl.store(out_ptr3 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3, primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_native_group_norm_relu_threshold_backward_1[grid(4)]( buf6, primals_6, primals_7, buf7, buf11, buf12, buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor( buf10, (4, 1), (1, 1), 0), buf12) class Conv2d_GN_ReLU(nn.Module): """ Implements a module that performs conv2d + groupnorm + ReLU + Assumes kernel size is odd """ def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1 ): super(Conv2d_GN_ReLU, self).__init__() padding = 0 if ksize < 2 else ksize // 2 self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, stride=stride, padding=padding, bias=False) self.gn1 = nn.GroupNorm(num_groups, out_channels) self.relu1 = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) out = self.gn1(out) out = self.relu1(out) return out class Conv2d_GN_ReLUx2New(nn.Module): """ Implements a module that performs conv2d + groupnorm + ReLU + conv2d + groupnorm + ReLU (and a possible downsampling operation) Assumes kernel size is odd """ def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1 ): super(Conv2d_GN_ReLUx2New, self).__init__() self.layer1 = Conv2d_GN_ReLU(in_channels, out_channels, num_groups, ksize=ksize, stride=stride) self.layer2 = Conv2d_GN_ReLU(out_channels, out_channels, num_groups, ksize=ksize, stride=stride) def forward(self, input_0): primals_1 = self.layer1.conv1.weight primals_3 = self.layer1.gn1.weight primals_4 = self.layer1.gn1.bias primals_5 = self.layer2.conv1.weight primals_6 = self.layer2.gn1.weight primals_7 = self.layer2.gn1.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Guangyun-Xu/uois
Conv2d_GN_ReLUx2
false
13,744
[ "MIT" ]
106
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
NullDiscriminator
import torch import torch.nn as nn import torch.utils.data class NullDiscriminator(nn.Module): def __init__(self): super(NullDiscriminator, self).__init__() def forward(self, inputs, y=None): d = inputs.sum(1, keepdim=True) return d def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class NullDiscriminatorNew(nn.Module): def __init__(self): super(NullDiscriminatorNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
HappyBelief/ContraD
NullDiscriminator
false
13,745
[ "MIT" ]
168
abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
https://github.com/HappyBelief/ContraD/tree/abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
BiDAFAttention
import torch import torch.nn as nn import torch.nn.functional as F def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class BiDAFAttention(nn.Module): """Bidirectional attention originally used by BiDAF. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super(BiDAFAttention, self).__init__() self.drop_prob = drop_prob self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, c, q, c_mask, q_mask): batch_size, c_len, _ = c.size() q_len = q.size(1) s = self.get_similarity_matrix(c, q) c_mask = c_mask.view(batch_size, c_len, 1) q_mask = q_mask.view(batch_size, 1, q_len) s1 = masked_softmax(s, q_mask, dim=2) s2 = masked_softmax(s, c_mask, dim=1) a = torch.bmm(s1, q) b = torch.bmm(torch.bmm(s1, s2.transpose(1, 2)), c) x = torch.cat([c, a, c * a, c * b], dim=2) return x def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr3 + x4, xmask) tmp6 = tl.load(in_ptr4 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp5 + tmp7 tmp9 = tmp0 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp0 tmp12 = -1e+30 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 * tmp8 tmp17 = tmp10 - tmp15 tmp18 = tmp17 * tmp12 tmp19 = tmp16 + tmp18 tl.store(out_ptr0 + x4, tmp14, xmask) tl.store(out_ptr1 + x4, tmp19, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr2 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](primals_1, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(primals_2, (4, 4, 4), ( 16, 1, 4), 0), out=buf3) buf4 = buf2 del buf2 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_1[grid(64)](primals_8, buf0, buf1, buf3, primals_6, primals_7, buf4, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_6 buf5 = buf3 del buf3 triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf5 del buf5 triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_5[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf8 del buf8 extern_kernels.bmm(buf6, primals_2, out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf11, primals_1, out=buf12) del buf11 buf13 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_6[grid(256)](primals_1, buf10, buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del buf12 return buf13, primals_1, primals_2, primals_7, primals_8, buf6, buf9 def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class BiDAFAttentionNew(nn.Module): """Bidirectional attention originally used by BiDAF. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super(BiDAFAttentionNew, self).__init__() self.drop_prob = drop_prob self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.c_weight primals_4 = self.q_weight primals_5 = self.cq_weight primals_6 = self.bias primals_1 = input_0 primals_2 = input_1 primals_7 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
HakobJak/ml-mipt
BiDAFAttention
false
13,746
[ "MIT" ]
440
ab0cbd5d553e9da309bda54d35b4e93a8eb99696
https://github.com/HakobJak/ml-mipt/tree/ab0cbd5d553e9da309bda54d35b4e93a8eb99696
FusedLeakyReLU
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self .scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_leaky_relu_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = 1.4142135623730951 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp9, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_leaky_relu_mul_0[grid(256)](primals_2, primals_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf1, buf0 def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale class FusedLeakyReLUNew(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
HappyBelief/ContraD
FusedLeakyReLU
false
13,747
[ "MIT" ]
168
abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
https://github.com/HappyBelief/ContraD/tree/abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
GluMlp
import torch import torch.nn as nn import torch.utils.collect_env class GluMlp(nn.Module): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features // 2, out_features) self.drop = nn.Dropout(drop) def init_weights(self): fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-06) def forward(self, x): x = self.fc1(x) x, gates = x.chunk(2, dim=-1) x = x * self.act(gates) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.collect_env assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp2 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp2 * tmp1 tl.store(out_ptr0 + x2, tmp1, xmask) tl.store(out_ptr1 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(128)](buf0, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2), ( 2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 2), (64, 16, 4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), primals_4 class GluMlpNew(nn.Module): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features // 2, out_features) self.drop = nn.Dropout(drop) def init_weights(self): fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-06) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HaotianUpenn/scatterbrain
GluMlp
false
13,748
[ "Apache-2.0" ]
49
c026128d7362ae627641d11d4e5627bc1f400eb1
https://github.com/HaotianUpenn/scatterbrain/tree/c026128d7362ae627641d11d4e5627bc1f400eb1
AngleWiseRKD
import torch from torch import nn import torch.nn.functional as F def angle(pred): """Calculate the angle-wise relational potential which measures the angle formed by the three examples in the output representation space. Args: pred (torch.Tensor): The prediction of the teacher or student with shape (N, C). """ pred_vec = pred.unsqueeze(0) - pred.unsqueeze(1) norm_pred_vec = F.normalize(pred_vec, p=2, dim=2) angle = torch.bmm(norm_pred_vec, norm_pred_vec.transpose(1, 2)).view(-1) return angle class AngleWiseRKD(nn.Module): """PyTorch version of angle-wise loss of `Relational Knowledge Distillation. <https://arxiv.org/abs/1904.05068>`_. Args: loss_weight (float): Weight of angle-wise distillation loss. Defaults to 50.0. with_l2_norm (bool): Whether to normalize the model predictions before calculating the loss. Defaults to True. """ def __init__(self, loss_weight=50.0, with_l2_norm=True): super(AngleWiseRKD, self).__init__() self.loss_weight = loss_weight self.with_l2_norm = with_l2_norm def angle_loss(self, preds_S, preds_T): """Calculate the angle-wise distillation loss.""" angle_T = angle(preds_T) angle_S = angle(preds_S) return F.smooth_l1_loss(angle_S, angle_T) def forward(self, preds_S, preds_T): """Forward computation. Args: preds_S (torch.Tensor): The student model prediction with shape (N, C, H, W) or shape (N, C). preds_T (torch.Tensor): The teacher model prediction with shape (N, C, H, W) or shape (N, C). Return: torch.Tensor: The calculated loss value. """ preds_S = preds_S.view(preds_S.shape[0], -1) preds_T = preds_T.view(preds_T.shape[0], -1) if self.with_l2_norm: preds_S = F.normalize(preds_S, p=2, dim=-1) preds_T = F.normalize(preds_T, p=2, dim=-1) loss = self.angle_loss(preds_S, preds_T) * self.loss_weight return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_per_fused_div_linalg_vector_norm_sub_1(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp8 = libdevice.sqrt(tmp7) tmp9 = triton_helpers.maximum(tmp8, tmp3) tmp10 = tmp6 / tmp9 tmp11 = tmp5 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = libdevice.sqrt(tmp16) tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = tmp11 / tmp18 tl.store(out_ptr1 + (r2 + 64 * x3), tmp19, xmask) @triton.jit def triton_per_fused_mul_smooth_l1_loss_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 64.0 tmp16 = tmp14 / tmp15 tmp17 = 50.0 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_per_fused_linalg_vector_norm_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32) triton_per_fused_div_linalg_vector_norm_sub_1[grid(16)](arg0_1, buf0, buf2, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf2, (4, 64, 4), (256, 1, 64), 0), out=buf3) buf4 = buf0 del buf0 triton_per_fused_linalg_vector_norm_0[grid(4)](arg1_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = buf2 del buf2 triton_per_fused_div_linalg_vector_norm_sub_1[grid(16)](arg1_1, buf4, buf6, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf4 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf6, (4, 64, 4), (256, 1, 64), 0), out=buf7) del buf6 buf8 = empty_strided_cuda((), (), torch.float32) buf9 = buf8 del buf8 triton_per_fused_mul_smooth_l1_loss_2[grid(1)](buf9, buf3, buf7, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf3 del buf7 return buf9, def angle(pred): """Calculate the angle-wise relational potential which measures the angle formed by the three examples in the output representation space. Args: pred (torch.Tensor): The prediction of the teacher or student with shape (N, C). """ pred_vec = pred.unsqueeze(0) - pred.unsqueeze(1) norm_pred_vec = F.normalize(pred_vec, p=2, dim=2) angle = torch.bmm(norm_pred_vec, norm_pred_vec.transpose(1, 2)).view(-1) return angle class AngleWiseRKDNew(nn.Module): """PyTorch version of angle-wise loss of `Relational Knowledge Distillation. <https://arxiv.org/abs/1904.05068>`_. Args: loss_weight (float): Weight of angle-wise distillation loss. Defaults to 50.0. with_l2_norm (bool): Whether to normalize the model predictions before calculating the loss. Defaults to True. """ def __init__(self, loss_weight=50.0, with_l2_norm=True): super(AngleWiseRKDNew, self).__init__() self.loss_weight = loss_weight self.with_l2_norm = with_l2_norm def angle_loss(self, preds_S, preds_T): """Calculate the angle-wise distillation loss.""" angle_T = angle(preds_T) angle_S = angle(preds_S) return F.smooth_l1_loss(angle_S, angle_T) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
HIT-cwh/mmrazor
AngleWiseRKD
false
13,749
[ "Apache-2.0" ]
553
2dad24044d7f1dad88f20221f8fc071dd40fdd4f
https://github.com/HIT-cwh/mmrazor/tree/2dad24044d7f1dad88f20221f8fc071dd40fdd4f
Mul
import torch import torch as ch class Mul(ch.nn.Module): def __init__(self, weight): super(Mul, self).__init__() self.weight = weight def forward(self, x): return x * self.weight def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch as ch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MulNew(ch.nn.Module): def __init__(self, weight): super(MulNew, self).__init__() self.weight = weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Hadisalman/ffcv
Mul
false
13,750
[ "Apache-2.0" ]
1,969
64bd2b9e9c9fc3779ba13ef958ae479ecfac9c7f
https://github.com/Hadisalman/ffcv/tree/64bd2b9e9c9fc3779ba13ef958ae479ecfac9c7f
Attention
import torch import torch.nn.functional as F from torch import nn class Attention(nn.Module): def __init__(self, input_size, hidden_size): super(Attention, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, 1) def softmax_mask(self, val, mask): rank_val = len(list(val.shape)) rank_mask = len(list(mask.shape)) if rank_val - rank_mask == 1: mask = torch.unsqueeze(mask, axis=-1) return (0 - 1e+30) * (1 - mask.float()) + val def forward(self, inputs, mask=None, keep_prob=1.0, is_train=True): x = torch.dropout(inputs, keep_prob, is_train) x = self.fc1(x) x = torch.tanh(x) x = self.fc2(x) if mask is not None: x = self.softmax_mask(x, mask) x = F.softmax(x, dim=1) x = x.squeeze(-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.native_dropout.default(primals_1, 1.0, True) del primals_1 buf1 = buf0[0] del buf0 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf3) del primals_2 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf4, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf4, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused__softmax_2[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf7 return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, buf8, primals_4 class AttentionNew(nn.Module): def __init__(self, input_size, hidden_size): super(AttentionNew, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, 1) def softmax_mask(self, val, mask): rank_val = len(list(val.shape)) rank_mask = len(list(mask.shape)) if rank_val - rank_mask == 1: mask = torch.unsqueeze(mask, axis=-1) return (0 - 1e+30) * (1 - mask.float()) + val def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HIT-SCIR-xuanxuan/OpenKS
Attention
false
13,751
[ "Apache-2.0" ]
88
a7f2ce0890822113322aad22e98d6c961e63caef
https://github.com/HIT-SCIR-xuanxuan/OpenKS/tree/a7f2ce0890822113322aad22e98d6c961e63caef
ConvMlp
import torch import torch.nn as nn import torch.utils.collect_env class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity( ) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.collect_env assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class ConvMlpNew(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity( ) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HaotianUpenn/scatterbrain
ConvMlp
false
13,752
[ "Apache-2.0" ]
49
c026128d7362ae627641d11d4e5627bc1f400eb1
https://github.com/HaotianUpenn/scatterbrain/tree/c026128d7362ae627641d11d4e5627bc1f400eb1
BasicBlock
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1) class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = conv3x3(in_planes, planes, stride) self.conv2 = conv3x3(planes, planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self. expansion * planes, kernel_size=1, stride=stride)) def forward(self, x): out = F.leaky_relu(self.conv1(x), 0.1, inplace=True) out = self.conv2(out) out = out + self.shortcut(x) out = F.leaky_relu(out, 0.1, inplace=True) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.1 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(in_out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1[grid (256)](buf3, primals_5, primals_3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf4 def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1) class BasicBlockNew(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlockNew, self).__init__() self.conv1 = conv3x3(in_planes, planes, stride) self.conv2 = conv3x3(planes, planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self. expansion * planes, kernel_size=1, stride=stride)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HappyBelief/ContraD
BasicBlock
false
13,753
[ "MIT" ]
168
abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
https://github.com/HappyBelief/ContraD/tree/abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
FullAttention
import math import torch import torch.nn as nn import torch.utils.collect_env class FullAttention(nn.Module): """Implement the scaled dot product attention with softmax. Arguments --------- softmax_temp: The temperature to use for the softmax attention. (default: 1/sqrt(d_keys) where d_keys is computed at runtime) attention_dropout: The dropout rate to apply to the attention (default: 0.1) """ def __init__(self, softmax_temp=None, attention_dropout=0.0, device= None, dtype=None): super().__init__() self.softmax_temp = softmax_temp self.dropout = nn.Dropout(attention_dropout) def forward(self, query, key, value, attn_mask=None, key_padding_mask= None, need_weights=True): """Implements the multihead softmax attention. Arguments --------- query: (B, T, H, E) The tensor containing the query key: (B, S, H, E) The tensor containing the key value: (B, S, H, D) The tensor containing the value attn_mask: An implementation of BaseMask that encodes where each query can attend to key_padding_mask: An implementation of BaseMask that encodes how many query each sequence in the batch consists of """ _B, _T, _H, E = query.shape _, _S, _, _D = value.shape softmax_temp = self.softmax_temp or 1 / math.sqrt(E) query = query * softmax_temp QK = torch.einsum('bthe,bshe->bhts', query, key) if attn_mask is not None and not attn_mask.all_ones: QK.masked_fill_(~attn_mask.bool_matrix, float('-inf')) if key_padding_mask is not None and not key_padding_mask.all_ones: QK.masked_fill_(rearrange(~key_padding_mask.bool_matrix, 'b s -> b 1 1 s'), float('-inf')) attn = torch.softmax(QK, dim=-1) A = self.dropout(attn) output = torch.einsum('bhts,bshd->bthd', A, value) return output, attn if need_weights else None def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.collect_env assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_1[grid(64, 4)](arg2_1, buf1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg2_1 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_3[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_4[grid(256)](arg1_1, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf6 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf5 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 4, 16, 1), 0), buf4 class FullAttentionNew(nn.Module): """Implement the scaled dot product attention with softmax. Arguments --------- softmax_temp: The temperature to use for the softmax attention. (default: 1/sqrt(d_keys) where d_keys is computed at runtime) attention_dropout: The dropout rate to apply to the attention (default: 0.1) """ def __init__(self, softmax_temp=None, attention_dropout=0.0, device= None, dtype=None): super().__init__() self.softmax_temp = softmax_temp self.dropout = nn.Dropout(attention_dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
HaotianUpenn/scatterbrain
FullAttention
false
13,754
[ "Apache-2.0" ]
49
c026128d7362ae627641d11d4e5627bc1f400eb1
https://github.com/HaotianUpenn/scatterbrain/tree/c026128d7362ae627641d11d4e5627bc1f400eb1
BayesLinear
from torch.nn import Module import math import torch from torch.nn import Parameter import torch.nn.functional as F class BayesLinear(Module): """ Applies Bayesian Linear Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following linear of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py """ __constants__ = ['prior_mu', 'prior_sigma', 'bias', 'in_features', 'out_features'] def __init__(self, prior_mu, prior_sigma, in_features, out_features, bias=True): super(BayesLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.prior_mu = prior_mu self.prior_sigma = prior_sigma self.prior_log_sigma = math.log(prior_sigma) self.weight_mu = Parameter(torch.Tensor(out_features, in_features)) self.weight_log_sigma = Parameter(torch.Tensor(out_features, in_features)) self.register_buffer('weight_eps', None) if bias is None or bias is False: self.bias = False else: self.bias = True if self.bias: self.bias_mu = Parameter(torch.Tensor(out_features)) self.bias_log_sigma = Parameter(torch.Tensor(out_features)) self.register_buffer('bias_eps', None) else: self.register_parameter('bias_mu', None) self.register_parameter('bias_log_sigma', None) self.register_buffer('bias_eps', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight_mu.size(1)) self.weight_mu.data.uniform_(-stdv, stdv) self.weight_log_sigma.data.fill_(self.prior_log_sigma) if self.bias: self.bias_mu.data.uniform_(-stdv, stdv) self.bias_log_sigma.data.fill_(self.prior_log_sigma) def freeze(self): self.weight_eps = torch.randn_like(self.weight_log_sigma) if self.bias: self.bias_eps = torch.randn_like(self.bias_log_sigma) def unfreeze(self): self.weight_eps = None if self.bias: self.bias_eps = None def forward(self, input): """ Overriden. """ if self.weight_eps is None: weight = self.weight_mu + torch.exp(self.weight_log_sigma ) * torch.randn_like(self.weight_log_sigma) else: weight = self.weight_mu + torch.exp(self.weight_log_sigma ) * self.weight_eps if self.bias: if self.bias_eps is None: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * torch.randn_like(self.bias_log_sigma) else: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * self.bias_eps else: bias = None return F.linear(input, weight, bias) def extra_repr(self): """ Overriden. """ return ( 'prior_mu={}, prior_sigma={}, in_features={}, out_features={}, bias={}' .format(self.prior_mu, self.prior_sigma, self.in_features, self .out_features, self.bias is not None)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'prior_mu': 4, 'prior_sigma': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = torch.ops.aten.randn.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(16)](primals_1, primals_2, buf1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_exp_mul_1[grid(4)](primals_3, primals_4, buf3, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf5, reinterpret_tensor(primals_5, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del buf4 del buf5 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, primals_4, buf1, buf3, reinterpret_tensor(primals_5, (64, 4), (4, 1), 0) class BayesLinearNew(Module): """ Applies Bayesian Linear Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following linear of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py """ __constants__ = ['prior_mu', 'prior_sigma', 'bias', 'in_features', 'out_features'] def __init__(self, prior_mu, prior_sigma, in_features, out_features, bias=True): super(BayesLinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.prior_mu = prior_mu self.prior_sigma = prior_sigma self.prior_log_sigma = math.log(prior_sigma) self.weight_mu = Parameter(torch.Tensor(out_features, in_features)) self.weight_log_sigma = Parameter(torch.Tensor(out_features, in_features)) self.register_buffer('weight_eps', None) if bias is None or bias is False: self.bias = False else: self.bias = True if self.bias: self.bias_mu = Parameter(torch.Tensor(out_features)) self.bias_log_sigma = Parameter(torch.Tensor(out_features)) self.register_buffer('bias_eps', None) else: self.register_parameter('bias_mu', None) self.register_parameter('bias_log_sigma', None) self.register_buffer('bias_eps', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight_mu.size(1)) self.weight_mu.data.uniform_(-stdv, stdv) self.weight_log_sigma.data.fill_(self.prior_log_sigma) if self.bias: self.bias_mu.data.uniform_(-stdv, stdv) self.bias_log_sigma.data.fill_(self.prior_log_sigma) def freeze(self): self.weight_eps = torch.randn_like(self.weight_log_sigma) if self.bias: self.bias_eps = torch.randn_like(self.bias_log_sigma) def unfreeze(self): self.weight_eps = None if self.bias: self.bias_eps = None def extra_repr(self): """ Overriden. """ return ( 'prior_mu={}, prior_sigma={}, in_features={}, out_features={}, bias={}' .format(self.prior_mu, self.prior_sigma, self.in_features, self .out_features, self.bias is not None)) def forward(self, input_0): primals_1 = self.weight_mu primals_2 = self.weight_log_sigma primals_3 = self.bias_mu primals_4 = self.bias_log_sigma primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Harry24k/bayesian-neural-network-pytorch
BayesLinear
false
13,755
[ "MIT" ]
178
d2272f09e0d08c1abe1f53ce6df56b31494d7020
https://github.com/Harry24k/bayesian-neural-network-pytorch/tree/d2272f09e0d08c1abe1f53ce6df56b31494d7020
ResidualAttentionBlock
import torch from torch import nn from collections import OrderedDict class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: 'int', n_head: 'int', attn_mask: 'torch.Tensor'=None): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear( d_model * 4, d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask def attention(self, x: 'torch.Tensor'): self.attn_mask = self.attn_mask if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask )[0] def forward(self, x: 'torch.Tensor'): x = x + self.attention(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'n_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__safe_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__safe_softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf4, (1, 4, 1, 4), (16, 1, 16, 4), 0) del buf4 triton_poi_fused_mul_3[grid(16)](buf7, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 0), 0 ), reinterpret_tensor(buf7, (4, 1, 4), (1, 0, 4), 0), out=buf8) buf9 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_4[grid(64)](buf8, buf9, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_5[grid(64)](buf8, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32) triton_poi_fused_clone_6[grid(4, 4)](buf11, buf12, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_7 buf14 = buf1 del buf1 buf15 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(4)](primals_1, buf13, buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_1, buf13, buf14, buf15, primals_8, primals_9, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf14 del buf15 del primals_9 buf17 = reinterpret_tensor(buf9, (4, 16), (16, 1), 0) del buf9 extern_kernels.addmm(primals_11, buf16, reinterpret_tensor( primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_11 buf18 = reinterpret_tensor(buf8, (4, 16), (16, 1), 0) del buf8 triton_poi_fused_mul_sigmoid_9[grid(64)](buf17, buf18, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf18, reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf19) buf20 = buf19 del buf19 triton_poi_fused_add_10[grid(16)](buf20, primals_1, buf13, primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_13 return (buf20, primals_1, primals_8, buf2, buf10, reinterpret_tensor( buf12, (4, 4), (4, 1), 0), buf13, buf16, buf17, buf18, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4 ), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 4, 4), 0), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 16), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlockNew(nn.Module): def __init__(self, d_model: 'int', n_head: 'int', attn_mask: 'torch.Tensor'=None): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear( d_model * 4, d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask def attention(self, x: 'torch.Tensor'): self.attn_mask = self.attn_mask if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask )[0] def forward(self, input_0): primals_4 = self.attn.in_proj_weight primals_5 = self.attn.in_proj_bias primals_1 = self.attn.out_proj.weight primals_2 = self.attn.out_proj.bias primals_3 = self.ln_1.weight primals_7 = self.ln_1.bias primals_10 = self.mlp.c_fc.weight primals_11 = self.mlp.c_fc.bias primals_12 = self.mlp.c_proj.weight primals_8 = self.mlp.c_proj.bias primals_9 = self.ln_2.weight primals_13 = self.ln_2.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
HIT-SCIR-xuanxuan/OpenKS
ResidualAttentionBlock
false
13,756
[ "Apache-2.0" ]
88
a7f2ce0890822113322aad22e98d6c961e63caef
https://github.com/HIT-SCIR-xuanxuan/OpenKS/tree/a7f2ce0890822113322aad22e98d6c961e63caef
EqualLinear
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias_init=0, lr_mul=1, activation=None ): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) self.bias = nn.Parameter(torch.zeros(out_dim)) self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul self.bias_init = bias_init def forward(self, input): bias = self.bias * self.lr_mul + self.bias_init if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, bias) else: out = F.linear(input, self.weight * self.scale, bias=bias) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim, bias_init=0, lr_mul=1, activation=None ): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) self.bias = nn.Parameter(torch.zeros(out_dim)) self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul self.bias_init = bias_init def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HappyBelief/ContraD
EqualLinear
false
13,757
[ "MIT" ]
168
abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
https://github.com/HappyBelief/ContraD/tree/abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
GELU
import torch import torch.nn.functional as F import torch.utils.model_zoo import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class GELU(torch.nn.Module): def forward(self, x): return F.gelu(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.model_zoo import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GELUNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
HelenR6/imagenet-r
GELU
false
13,758
[ "MIT" ]
155
0bf04f2bf5d60d1098fc9a78f4e8c042e434eb69
https://github.com/HelenR6/imagenet-r/tree/0bf04f2bf5d60d1098fc9a78f4e8c042e434eb69
ConvBnRel
import torch from torch.autograd.gradcheck import * import torch.nn as nn import torch.nn class ConvBnRel(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False, bn=False, reverse=False, bias=False): super(ConvBnRel, self).__init__() padding = int((kernel_size - 1) // 2) if same_padding else 0 if not reverse: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias) else: self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias) self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None if active_unit == 'relu': self.active_unit = nn.ReLU(inplace=True) elif active_unit == 'elu': self.active_unit = nn.ELU(inplace=True) else: self.active_unit = None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.active_unit is not None: x = self.active_unit(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.autograd.gradcheck import * import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_1, primals_2, buf2 class ConvBnRelNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False, bn=False, reverse=False, bias=False): super(ConvBnRelNew, self).__init__() padding = int((kernel_size - 1) // 2) if same_padding else 0 if not reverse: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias) else: self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias) self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None if active_unit == 'relu': self.active_unit = nn.ReLU(inplace=True) elif active_unit == 'elu': self.active_unit = nn.ELU(inplace=True) else: self.active_unit = None def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
HastingsGreer/mermaid
ConvBnRel
false
13,759
[ "Apache-2.0" ]
120
bd13c5fc427eb8cd9054973a8eaaeb302078182d
https://github.com/HastingsGreer/mermaid/tree/bd13c5fc427eb8cd9054973a8eaaeb302078182d
HLoss
import torch from torch.autograd.gradcheck import * import torch.nn as nn import torch.nn class HLoss(nn.Module): def __init__(self): super(HLoss, self).__init__() def forward(self, x, spacing): volumeElement = spacing.prod() b = x * torch.log(x) b = -1.0 * b.sum() * volumeElement return b def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.autograd.gradcheck import * import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_log_mul_prod_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(triton_helpers.prod(tmp7, 0)) tmp10 = -1.0 tmp11 = tmp5 * tmp10 tmp12 = tmp11 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_log_mul_prod_sum_0[grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class HLossNew(nn.Module): def __init__(self): super(HLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
HastingsGreer/mermaid
HLoss
false
13,760
[ "Apache-2.0" ]
120
bd13c5fc427eb8cd9054973a8eaaeb302078182d
https://github.com/HastingsGreer/mermaid/tree/bd13c5fc427eb8cd9054973a8eaaeb302078182d
TinyDiscriminator
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class TinyDiscriminator(nn.Module): def __init__(self, n_features, n_classes=1, d_hidden=128): super(TinyDiscriminator, self).__init__() self.n_features = n_features self.n_classes = n_classes self.d_hidden = d_hidden self.l1 = nn.Linear(n_features, d_hidden) self.l2 = nn.Linear(d_hidden, 1) if n_classes > 1: self.linear_y = nn.Embedding(n_classes, d_hidden) def forward(self, inputs, y=None): output = self.l1(inputs) features = F.leaky_relu(output, 0.1, inplace=True) d = self.l2(features) if y is not None: w_y = self.linear_y(y) d = d + (features * w_y).sum(1, keepdim=True) return d def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x4, tmp7, None) tl.store(out_ptr0 + x4, tmp8, None) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 512 * (x1 % 4 // 4) + 2048 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), None) tl.store(out_ptr0 + x2, tmp0, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 128), (128, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(8192)](buf1, primals_2, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) triton_poi_fused_view_1[grid(8192)](buf1, buf2, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_5 return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, primals_4, buf5 class TinyDiscriminatorNew(nn.Module): def __init__(self, n_features, n_classes=1, d_hidden=128): super(TinyDiscriminatorNew, self).__init__() self.n_features = n_features self.n_classes = n_classes self.d_hidden = d_hidden self.l1 = nn.Linear(n_features, d_hidden) self.l2 = nn.Linear(d_hidden, 1) if n_classes > 1: self.linear_y = nn.Embedding(n_classes, d_hidden) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HappyBelief/ContraD
TinyDiscriminator
false
13,761
[ "MIT" ]
168
abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
https://github.com/HappyBelief/ContraD/tree/abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
UpBlock
import torch import torch.cuda import torch.nn as nn class UpBlock(nn.Module): def __init__(self, in_, out, scale): super().__init__() self.up_conv = nn.Conv2d(in_, out, 1) self.upsample = nn.UpsamplingNearest2d(scale_factor=scale) def forward(self, x): return self.upsample(self.up_conv(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_': 4, 'out': 4, 'scale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.cuda import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_arange_mul_0(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x5 = xindex // 16 x2 = xindex // 16 % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + x6, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_add_arange_mul_0[grid(4)](buf1, 4, XBLOCK =4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_1[grid(256)](buf1, buf0, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf2, primals_1, primals_3, buf1 class UpBlockNew(nn.Module): def __init__(self, in_, out, scale): super().__init__() self.up_conv = nn.Conv2d(in_, out, 1) self.upsample = nn.UpsamplingNearest2d(scale_factor=scale) def forward(self, input_0): primals_1 = self.up_conv.weight primals_2 = self.up_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HalfLemon/kaggle-dstl
UpBlock
false
13,762
[ "MIT" ]
218
b1d3a518bbbd3503bdf07400841183d2386fd158
https://github.com/HalfLemon/kaggle-dstl/tree/b1d3a518bbbd3503bdf07400841183d2386fd158
Norm
import torch from torch import nn class Norm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.size = d_model self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim =-1, keepdim=True) + self.eps) + self.bias return norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class NormNew(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.size = d_model self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, input_0): primals_1 = self.alpha primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HebatallaTarek/Empathy-Mental-Health
Norm
false
13,763
[ "BSD-3-Clause" ]
66
16e2a5f93aabd22803bb39805f8e76c8bea0ccf2
https://github.com/HebatallaTarek/Empathy-Mental-Health/tree/16e2a5f93aabd22803bb39805f8e76c8bea0ccf2
GRUCell
import torch import numpy as np import torch.nn.functional as F import torch.utils.data import torch.nn as nn class GRUCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(GRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_in = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias) self.init_parameters() def init_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, h): x = x.view(-1, x.shape[1]) i_r = self.fc_ir(x) h_r = self.fc_hr(h) i_z = self.fc_iz(x) h_z = self.fc_hz(h) i_n = self.fc_in(x) h_n = self.fc_hn(h) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_z + h_z) newgate = F.tanh(i_n + resetgate * h_n) hy = newgate + inputgate * (h - newgate) return hy def get_inputs(): return [torch.rand([4, 4, 64, 4]), torch.rand([4, 4, 1024, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4096 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, None) tmp5 = tl.load(in_ptr3 + x2, None) tmp9 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr5 + x2, None) tmp13 = tl.load(in_ptr6 + x2, None) tmp3 = tmp1 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = libdevice.tanh(tmp7) tmp11 = tmp9 + tmp10 tmp12 = tl.sigmoid(tmp11) tmp14 = tmp13 - tmp8 tmp15 = tmp12 * tmp14 tmp16 = tmp8 + tmp15 tl.store(out_ptr0 + x2, tmp16, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4, 64, 4), (1024, 256, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1024, 4), (16384, 4096, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (1024, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16384, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, ( 16384, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((1024, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_1, (1024, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16384, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(primals_6, ( 16384, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_10 del primals_9 buf4 = empty_strided_cuda((1024, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(primals_1, ( 1024, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_11 del primals_12 buf5 = empty_strided_cuda((16384, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, ( 16384, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), ( 1, 4), 0), alpha=1, beta=1, out=buf5) del primals_13 del primals_14 buf6 = empty_strided_cuda((4, 4, 1024, 4), (16384, 4096, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(65536)](buf4, buf0, buf1, buf5, buf2, buf3, primals_6, buf6, 65536, XBLOCK=256, num_warps=4, num_stages=1) return buf6, primals_6, reinterpret_tensor(primals_1, (1024, 4), (4, 1), 0 ), buf0, buf1, buf2, buf3, buf4, buf5 class GRUCellNew(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(GRUCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_in = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias) self.init_parameters() def init_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, input_0, input_1): primals_2 = self.fc_ir.weight primals_3 = self.fc_ir.bias primals_4 = self.fc_hr.weight primals_5 = self.fc_hr.bias primals_7 = self.fc_iz.weight primals_8 = self.fc_iz.bias primals_9 = self.fc_hz.weight primals_10 = self.fc_hz.bias primals_11 = self.fc_in.weight primals_12 = self.fc_in.bias primals_13 = self.fc_hn.weight primals_14 = self.fc_hn.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
H4LL/PyGrid
GRUCell
false
13,764
[ "Apache-2.0" ]
69
62d5ba6f207498ca365c12ac59dbcd11c1337881
https://github.com/H4LL/PyGrid/tree/62d5ba6f207498ca365c12ac59dbcd11c1337881
LastLevelMaxPool
import torch import torch.utils.data from torchvision.transforms import functional as F from torch import nn import torch.nn.functional as F import torchvision.transforms.functional as F from torch.nn import functional as F class LastLevelMaxPool(nn.Module): def forward(self, x): return [F.max_pool2d(x, 1, 2, 0)] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class LastLevelMaxPoolNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BorisLestsov/retinamask
LastLevelMaxPool
false
13,765
[ "MIT" ]
706
265a65f018c64220bcea946d306fc7b07a692b16
https://github.com/BorisLestsov/retinamask/tree/265a65f018c64220bcea946d306fc7b07a692b16
AdaptiveConcatPool2d
import torch from torch import nn from torchvision.models import * class AdaptiveConcatPool2d(nn.Module): def __init__(self, sz=None): super().__init__() sz = sz or (1, 1) self.ap = nn.AdaptiveAvgPool2d(sz) self.mp = nn.AdaptiveMaxPool2d(sz) def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torchvision.models import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask) @triton.jit def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32) buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4) triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf3, class AdaptiveConcatPool2dNew(nn.Module): def __init__(self, sz=None): super().__init__() sz = sz or (1, 1) self.ap = nn.AdaptiveAvgPool2d(sz) self.mp = nn.AdaptiveMaxPool2d(sz) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ArcGIS/raster-deep-learning
AdaptiveConcatPool2d
false
13,766
[ "Apache-2.0" ]
154
0af006d70c605707bab2bb11ae6393fd65ce8820
https://github.com/ArcGIS/raster-deep-learning/tree/0af006d70c605707bab2bb11ae6393fd65ce8820
UPChannelRPN
import torch import torch.nn.functional as F import torch.nn as nn def xcorr_fast(x, kernel): """group conv2d to calculate cross correlation, fast version """ batch = kernel.size()[0] pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) px = x.view(1, -1, x.size()[2], x.size()[3]) po = F.conv2d(px, pk, groups=batch) po = po.view(batch, -1, po.size()[2], po.size()[3]) return po class RPN(nn.Module): def __init__(self): super(RPN, self).__init__() def forward(self, z_f, x_f): raise NotImplementedError class UPChannelRPN(RPN): def __init__(self, anchor_num=5, feature_in=256): super(UPChannelRPN, self).__init__() cls_output = 2 * anchor_num loc_output = 4 * anchor_num self.template_cls_conv = nn.Conv2d(feature_in, feature_in * cls_output, kernel_size=3) self.template_loc_conv = nn.Conv2d(feature_in, feature_in * loc_output, kernel_size=3) self.search_cls_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3) self.search_loc_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3) self.loc_adjust = nn.Conv2d(loc_output, loc_output, kernel_size=1) def forward(self, z_f, x_f): cls_kernel = self.template_cls_conv(z_f) loc_kernel = self.template_loc_conv(z_f) cls_feature = self.search_cls_conv(x_f) loc_feature = self.search_loc_conv(x_f) cls = xcorr_fast(cls_feature, cls_kernel) loc = self.loc_adjust(xcorr_fast(loc_feature, loc_kernel)) return cls, loc def get_inputs(): return [torch.rand([4, 256, 64, 64]), torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x1 = xindex // 3844 % 2560 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3844 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_view_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x1 = xindex // 3844 % 5120 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (2560, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_2, (2560,), (1,)) assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_4, (5120, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (5120,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (20, 20, 1, 1), (20, 1, 1, 1)) assert_size_stride(primals_12, (20,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2560, 62, 62), (9840640, 3844, 62, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 5120, 62, 62), (19681280, 3844, 62, 1)) buf2 = extern_kernels.convolution(primals_8, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 62, 62), (984064, 3844, 62, 1)) buf3 = extern_kernels.convolution(primals_8, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 62, 62), (984064, 3844, 62, 1)) buf4 = buf0 del buf0 buf5 = reinterpret_tensor(buf4, (40, 256, 62, 62), (984064, 3844, 62, 1), 0) del buf4 get_raw_stream(0) triton_poi_fused_convolution_view_0[grid(39362560)](buf5, primals_2, 39362560, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf6 = buf2 del buf2 triton_poi_fused_convolution_1[grid(3936256)](buf6, primals_7, 3936256, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 1024, 62, 62), (0, 3844, 62, 1), 0), buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf7, (1, 40, 1, 1), (40, 1, 1, 1)) buf8 = buf1 del buf1 buf9 = reinterpret_tensor(buf8, (80, 256, 62, 62), (984064, 3844, 62, 1), 0) del buf8 triton_poi_fused_convolution_view_2[grid(78725120)](buf9, primals_5, 78725120, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf10 = buf3 del buf3 triton_poi_fused_convolution_1[grid(3936256)](buf10, primals_10, 3936256, XBLOCK=512, num_warps=8, num_stages=1) del primals_10 buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (1, 1024, 62, 62), (0, 3844, 62, 1), 0), buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf11, (1, 80, 1, 1), (80, 1, 1, 1)) buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 20, 1, 1), (20, 1, 1, 1), 0), primals_11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 20, 1, 1), (20, 1, 1, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_3[grid(80)](buf13, primals_12, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_12 return (reinterpret_tensor(buf7, (4, 10, 1, 1), (10, 1, 1, 1), 0), buf13, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, primals_11, buf5, reinterpret_tensor(buf6, (1, 1024, 62, 62), (3936256, 3844, 62, 1), 0), buf9, reinterpret_tensor(buf10, (1, 1024, 62, 62), (3936256, 3844, 62, 1), 0), reinterpret_tensor(buf11, (4, 20, 1, 1), (20, 1, 1, 1), 0)) def xcorr_fast(x, kernel): """group conv2d to calculate cross correlation, fast version """ batch = kernel.size()[0] pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) px = x.view(1, -1, x.size()[2], x.size()[3]) po = F.conv2d(px, pk, groups=batch) po = po.view(batch, -1, po.size()[2], po.size()[3]) return po class RPN(nn.Module): def __init__(self): super(RPN, self).__init__() def forward(self, z_f, x_f): raise NotImplementedError class UPChannelRPNNew(RPN): def __init__(self, anchor_num=5, feature_in=256): super(UPChannelRPNNew, self).__init__() cls_output = 2 * anchor_num loc_output = 4 * anchor_num self.template_cls_conv = nn.Conv2d(feature_in, feature_in * cls_output, kernel_size=3) self.template_loc_conv = nn.Conv2d(feature_in, feature_in * loc_output, kernel_size=3) self.search_cls_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3) self.search_loc_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3) self.loc_adjust = nn.Conv2d(loc_output, loc_output, kernel_size=1) def forward(self, input_0, input_1): primals_1 = self.template_cls_conv.weight primals_2 = self.template_cls_conv.bias primals_4 = self.template_loc_conv.weight primals_5 = self.template_loc_conv.bias primals_6 = self.search_cls_conv.weight primals_7 = self.search_cls_conv.bias primals_9 = self.search_loc_conv.weight primals_10 = self.search_loc_conv.bias primals_11 = self.loc_adjust.weight primals_12 = self.loc_adjust.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
DansYU/pysot
UPChannelRPN
false
13,767
[ "Apache-2.0" ]
4,318
3a43faccbba0280ef499736c82fd195f9c38373d
https://github.com/DansYU/pysot/tree/3a43faccbba0280ef499736c82fd195f9c38373d
ModulatedConv2d
from torch.autograd import Function import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias_init=0, lr_mul=1, activation=None ): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) self.bias = nn.Parameter(torch.zeros(out_dim)) self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul self.bias_init = bias_init def forward(self, input): bias = self.bias * self.lr_mul + self.bias_init if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, bias) else: out = F.linear(input, self.weight * self.scale, bias=bias) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) input = input.view(1, batch * in_channel, height, width) if self.upsample: weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) else: out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 + tmp1 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias_init=0, lr_mul=1, activation=None ): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) self.bias = nn.Parameter(torch.zeros(out_dim)) self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul self.bias_init = bias_init def forward(self, input): bias = self.bias * self.lr_mul + self.bias_init if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, bias) else: out = F.linear(input, self.weight * self.scale, bias=bias) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_3 = self.modulation.weight primals_2 = self.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HappyBelief/ContraD
ModulatedConv2d
false
13,768
[ "MIT" ]
168
abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
https://github.com/HappyBelief/ContraD/tree/abb72562ddac8d8ab37fe9af6ac4c44c61e8ea0f
BayesConv2d
from torch.nn import Module import math import torch from torch.nn import Parameter import torch.nn.functional as F from torch.nn.modules.utils import _pair class _BayesConvNd(Module): """ Applies Bayesian Convolution Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following conv of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py """ __constants__ = ['prior_mu', 'prior_sigma', 'stride', 'padding', 'dilation', 'groups', 'bias', 'padding_mode', 'output_padding', 'in_channels', 'out_channels', 'kernel_size'] def __init__(self, prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias, padding_mode): super(_BayesConvNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups self.padding_mode = padding_mode self.prior_mu = prior_mu self.prior_sigma = prior_sigma self.prior_log_sigma = math.log(prior_sigma) if transposed: self.weight_mu = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) self.weight_log_sigma = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) self.register_buffer('weight_eps', None) else: self.weight_mu = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) self.weight_log_sigma = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) self.register_buffer('weight_eps', None) if bias is None or bias is False: self.bias = False else: self.bias = True if self.bias: self.bias_mu = Parameter(torch.Tensor(out_channels)) self.bias_log_sigma = Parameter(torch.Tensor(out_channels)) self.register_buffer('bias_eps', None) else: self.register_parameter('bias_mu', None) self.register_parameter('bias_log_sigma', None) self.register_buffer('bias_eps', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels n *= self.kernel_size[0] ** 2 stdv = 1.0 / math.sqrt(n) self.weight_mu.data.uniform_(-stdv, stdv) self.weight_log_sigma.data.fill_(self.prior_log_sigma) if self.bias: self.bias_mu.data.uniform_(-stdv, stdv) self.bias_log_sigma.data.fill_(self.prior_log_sigma) def freeze(self): self.weight_eps = torch.randn_like(self.weight_log_sigma) if self.bias: self.bias_eps = torch.randn_like(self.bias_log_sigma) def unfreeze(self): self.weight_eps = None if self.bias: self.bias_eps = None def extra_repr(self): s = ( '{prior_mu}, {prior_sigma}, {in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}' ) if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is False: s += ', bias=False' return s.format(**self.__dict__) def __setstate__(self, state): super(_BayesConvNd, self).__setstate__(state) if not hasattr(self, 'padding_mode'): self.padding_mode = 'zeros' class BayesConv2d(_BayesConvNd): """ Applies Bayesian Convolution for 2D inputs Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following conv of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py """ def __init__(self, prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(BayesConv2d, self).__init__(prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode) def conv2d_forward(self, input, weight): if self.bias: if self.bias_eps is None: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * torch.randn_like(self.bias_log_sigma) else: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * self.bias_eps else: bias = None if self.padding_mode == 'circular': expanded_padding = (self.padding[1] + 1) // 2, self.padding[1 ] // 2, (self.padding[0] + 1) // 2, self.padding[0] // 2 return F.conv2d(F.pad(input, expanded_padding, mode='circular'), weight, bias, self.stride, _pair(0), self.dilation, self.groups ) return F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) def forward(self, input): """ Overriden. """ if self.weight_eps is None: weight = self.weight_mu + torch.exp(self.weight_log_sigma ) * torch.randn_like(self.weight_log_sigma) else: weight = self.weight_mu + torch.exp(self.weight_log_sigma ) * self.weight_eps return self.conv2d_forward(input, weight) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'prior_mu': 4, 'prior_sigma': 4, 'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math from torch.nn import Parameter import torch.nn.functional as F from torch.nn.modules.utils import _pair assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_convolution_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_convolution_exp_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(256)](primals_1, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf3 = torch.ops.aten.randn.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_convolution_exp_mul_1[grid(4)](primals_3, primals_4, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf6 = extern_kernels.convolution(primals_5, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 1, 1), (4, 1, 1, 1)) buf7 = buf6 del buf6 triton_poi_fused_add_convolution_exp_mul_2[grid(16)](buf7, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf7, primals_2, primals_4, primals_5, buf1, buf2, buf4 class _BayesConvNd(Module): """ Applies Bayesian Convolution Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following conv of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py """ __constants__ = ['prior_mu', 'prior_sigma', 'stride', 'padding', 'dilation', 'groups', 'bias', 'padding_mode', 'output_padding', 'in_channels', 'out_channels', 'kernel_size'] def __init__(self, prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias, padding_mode): super(_BayesConvNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups self.padding_mode = padding_mode self.prior_mu = prior_mu self.prior_sigma = prior_sigma self.prior_log_sigma = math.log(prior_sigma) if transposed: self.weight_mu = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) self.weight_log_sigma = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) self.register_buffer('weight_eps', None) else: self.weight_mu = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) self.weight_log_sigma = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) self.register_buffer('weight_eps', None) if bias is None or bias is False: self.bias = False else: self.bias = True if self.bias: self.bias_mu = Parameter(torch.Tensor(out_channels)) self.bias_log_sigma = Parameter(torch.Tensor(out_channels)) self.register_buffer('bias_eps', None) else: self.register_parameter('bias_mu', None) self.register_parameter('bias_log_sigma', None) self.register_buffer('bias_eps', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels n *= self.kernel_size[0] ** 2 stdv = 1.0 / math.sqrt(n) self.weight_mu.data.uniform_(-stdv, stdv) self.weight_log_sigma.data.fill_(self.prior_log_sigma) if self.bias: self.bias_mu.data.uniform_(-stdv, stdv) self.bias_log_sigma.data.fill_(self.prior_log_sigma) def freeze(self): self.weight_eps = torch.randn_like(self.weight_log_sigma) if self.bias: self.bias_eps = torch.randn_like(self.bias_log_sigma) def unfreeze(self): self.weight_eps = None if self.bias: self.bias_eps = None def extra_repr(self): s = ( '{prior_mu}, {prior_sigma}, {in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}' ) if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is False: s += ', bias=False' return s.format(**self.__dict__) def __setstate__(self, state): super(_BayesConvNd, self).__setstate__(state) if not hasattr(self, 'padding_mode'): self.padding_mode = 'zeros' class BayesConv2dNew(_BayesConvNd): """ Applies Bayesian Convolution for 2D inputs Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following conv of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py """ def __init__(self, prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(BayesConv2dNew, self).__init__(prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode) def conv2d_forward(self, input, weight): if self.bias: if self.bias_eps is None: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * torch.randn_like(self.bias_log_sigma) else: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * self.bias_eps else: bias = None if self.padding_mode == 'circular': expanded_padding = (self.padding[1] + 1) // 2, self.padding[1 ] // 2, (self.padding[0] + 1) // 2, self.padding[0] // 2 return F.conv2d(F.pad(input, expanded_padding, mode='circular'), weight, bias, self.stride, _pair(0), self.dilation, self.groups ) return F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) def forward(self, input_0): primals_1 = self.weight_mu primals_2 = self.weight_log_sigma primals_3 = self.bias_mu primals_4 = self.bias_log_sigma primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Harry24k/bayesian-neural-network-pytorch
BayesConv2d
false
13,769
[ "MIT" ]
178
d2272f09e0d08c1abe1f53ce6df56b31494d7020
https://github.com/Harry24k/bayesian-neural-network-pytorch/tree/d2272f09e0d08c1abe1f53ce6df56b31494d7020
ChannelPool
import torch import torch.nn as nn import torch.utils.model_zoo class ChannelPool(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1) .unsqueeze(1)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = 4.0 tmp25 = tmp23 / tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp14, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp13, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ChannelPoolNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
HolmesShuan/OISR-PyTorch
ChannelPool
false
13,770
[ "BSD-2-Clause" ]
141
bbe0c88f71fe565a2842df7971b62a9bc5a56c48
https://github.com/HolmesShuan/OISR-PyTorch/tree/bbe0c88f71fe565a2842df7971b62a9bc5a56c48
AttentionPool2d
import torch import torch.nn.functional as F from torch import nn class AttentionPool2d(nn.Module): def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute( 2, 0, 1) x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) x = x + self.positional_embedding[:, None, :] x, _ = F.multi_head_attention_forward(query=x, key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj. weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn= False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False) return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 17, tl.int64) tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x4, tmp16, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-8 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = 4 + y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 272 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 68 x3 = xindex // 68 tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = float('-inf') tmp12 = tmp0 == tmp11 tmp13 = tmp12 == 0 tmp14 = tmp13.to(tl.int64) tmp15 = tmp14 != 0 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = triton_helpers.any(tmp18, 1)[:, None] tmp20 = tmp19 == 0 tmp21 = tmp6 / tmp10 tmp22 = 0.0 tmp23 = tl.where(tmp20, tmp22, tmp21) tl.store(out_ptr3 + (r1 + 17 * x2 + 1184 * x3), tmp23, rmask & xmask) @triton.jit def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4624 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 289 x1 = xindex // 289 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 289 * (x1 % 4) + 1184 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 17 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 17 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (17, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2, buf1, 272, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((12,), (1,), torch.float32) triton_poi_fused_cat_2[grid(12)](primals_6, primals_7, primals_8, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(buf4, (4,), (1,), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta =1, out=buf5) del buf4 buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32) buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32) triton_poi_fused_mul_transpose_3[grid(16, 17)](buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0) del buf2 buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32) triton_poi_fused_mul_transpose_4[grid(16, 17)](buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) del buf3 del primals_6 del primals_7 del primals_8 buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8) buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32) triton_per_fused__safe_softmax_5[grid(272)](buf8, buf12, 272, 17, XBLOCK=1, num_warps=2, num_stages=1) buf13 = buf8 del buf8 triton_poi_fused_bmm_6[grid(4624)](buf12, buf13, 4624, XBLOCK=256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0) del buf7 extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14) del buf13 buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_7[grid(17, 16)](buf14, buf15, 17, 16, XBLOCK =16, YBLOCK=32, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0) del buf14 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 return reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor( buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3 class AttentionPool2dNew(nn.Module): def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, input_0): primals_2 = self.positional_embedding primals_3 = self.k_proj.weight primals_6 = self.k_proj.bias primals_4 = self.q_proj.weight primals_7 = self.q_proj.bias primals_5 = self.v_proj.weight primals_8 = self.v_proj.bias primals_9 = self.c_proj.weight primals_10 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
HIT-SCIR-xuanxuan/OpenKS
AttentionPool2d
false
13,771
[ "Apache-2.0" ]
88
a7f2ce0890822113322aad22e98d6c961e63caef
https://github.com/HIT-SCIR-xuanxuan/OpenKS/tree/a7f2ce0890822113322aad22e98d6c961e63caef