entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
MaxElementwise
import torch class MaxElementwise(torch.nn.Module): def forward(self, x, y): return torch.max(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_maximum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_maximum_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class MaxElementwiseNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
MaxElementwise
false
1,588
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
MultiHeadAttention
import math import torch from torch import nn class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax() def forward(self, q, k, v, mask=None, e=1e-12): batch_size, head, length, d_tensor = k.size() k_t = k.view(batch_size, head, d_tensor, length) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): q, k, v = self.w_q(q), self.w_k(k), self.w_v(v) q, k, v = self.split(q), self.split(k), self.split(v) out, _attention = self.attention(q, k, v, mask=mask) out = self.concat(out) out = self.w_concat(out) return out def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, self.n_head, length, d_tensor) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.view(batch_size, length, d_model) return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'n_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf6) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_11 return reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf6, (16, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0) class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax() def forward(self, q, k, v, mask=None, e=1e-12): batch_size, head, length, d_tensor = k.size() k_t = k.view(batch_size, head, d_tensor, length) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttentionNew, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, self.n_head, length, d_tensor) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.view(batch_size, length, d_model) return tensor def forward(self, input_0, input_1, input_2): primals_1 = self.w_q.weight primals_2 = self.w_q.bias primals_4 = self.w_k.weight primals_5 = self.w_k.bias primals_7 = self.w_v.weight primals_8 = self.w_v.bias primals_10 = self.w_concat.weight primals_11 = self.w_concat.bias primals_3 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
bsgiovanini/transformer
MultiHeadAttention
false
1,589
[ "Apache-2.0" ]
0
e128fa862f1b3d17d7b92df169a2bbee3f08366f
https://github.com/bsgiovanini/transformer/tree/e128fa862f1b3d17d7b92df169a2bbee3f08366f
WaveNetLayer
import torch import typing as T class WaveNetLayer(torch.nn.Module): """a single gated residual wavenet layer""" def __init__(self, channels: 'int', kernel_size: 'int', dilation: 'int'): super().__init__() self._conv = torch.nn.Conv1d(in_channels=channels, out_channels= channels, kernel_size=kernel_size, padding='same', dilation= dilation) self._conv_skip = torch.nn.Conv1d(in_channels=channels // 2, out_channels=channels, kernel_size=1) self._conv_out = torch.nn.Conv1d(in_channels=channels // 2, out_channels=channels, kernel_size=1) def forward(self, x: 'torch.Tensor') ->T.Tuple[torch.Tensor, torch.Tensor]: r = x x = self._conv(x) x, g = x.split(x.size(1) // 2, dim=1) x = torch.tanh(x) * torch.sigmoid(g) s = self._conv_skip(x) x = self._conv_out(x) x = (x + r) * torch.tensor(0.5).sqrt() return x, s def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'kernel_size': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 8 x4 = xindex % 8 x1 = xindex // 4 % 2 x3 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 16 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x4 + 16 * x2), xmask) tmp5 = tl.load(in_ptr1 + (2 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp8 = tmp3 * tmp7 tl.store(out_ptr0 + x3, tmp3, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) tl.store(out_ptr2 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_mul_sqrt_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.7071067690849304 tmp6 = tmp4 * tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 2, 1), (2, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 2, 1), (2, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(80)](primals_1, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_1[grid(32)](buf1, primals_3, buf2, buf3, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf1 del primals_3 buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4), (16, 4, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(64)](buf6, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf7 = extern_kernels.convolution(buf4, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4), (16, 4, 1)) buf8 = buf7 del buf7 triton_poi_fused_add_convolution_mul_sqrt_3[grid(64)](buf8, primals_7, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_7 return buf8, buf6, primals_2, primals_4, primals_6, buf0, buf2, buf3, buf4 class WaveNetLayerNew(torch.nn.Module): """a single gated residual wavenet layer""" def __init__(self, channels: 'int', kernel_size: 'int', dilation: 'int'): super().__init__() self._conv = torch.nn.Conv1d(in_channels=channels, out_channels= channels, kernel_size=kernel_size, padding='same', dilation= dilation) self._conv_skip = torch.nn.Conv1d(in_channels=channels // 2, out_channels=channels, kernel_size=1) self._conv_out = torch.nn.Conv1d(in_channels=channels // 2, out_channels=channels, kernel_size=1) def forward(self, input_0): primals_1 = self._conv.weight primals_3 = self._conv.bias primals_4 = self._conv_skip.weight primals_5 = self._conv_skip.bias primals_6 = self._conv_out.weight primals_7 = self._conv_out.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
brentspell/hifi-gan-bwe
WaveNetLayer
false
1,590
[ "MIT" ]
0
63579ac8055c63fc0e5a20ae90e2a86575fc8e12
https://github.com/brentspell/hifi-gan-bwe/tree/63579ac8055c63fc0e5a20ae90e2a86575fc8e12
IDiv
import torch class IDiv(torch.nn.Module): def __init__(self): super(IDiv, self).__init__() def forward(self, x, y): x /= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class IDivNew(torch.nn.Module): def __init__(self): super(IDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
IDiv
false
1,591
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
Mul
import torch class Mul(torch.nn.Module): def __init__(self): super(Mul, self).__init__() def forward(self, x, y): return x * y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class MulNew(torch.nn.Module): def __init__(self): super(MulNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
Mul
false
1,592
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
EQ
import torch class EQ(torch.nn.Module): def __init__(self): super(EQ, self).__init__() def forward(self, x, y): return x == y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class EQNew(torch.nn.Module): def __init__(self): super(EQNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
EQ
false
1,593
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
GT
import torch class GT(torch.nn.Module): def __init__(self): super(GT, self).__init__() def forward(self, x, y): return x > y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_gt_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class GTNew(torch.nn.Module): def __init__(self): super(GTNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
GT
false
1,594
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RAddFloat
import torch class RAddFloat(torch.nn.Module): def __init__(self): super(RAddFloat, self).__init__() def forward(self, x): return 1.0 + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RAddFloatNew(torch.nn.Module): def __init__(self): super(RAddFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RAddFloat
false
1,595
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
ISub
import torch class ISub(torch.nn.Module): def __init__(self): super(ISub, self).__init__() def forward(self, x, y): x -= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class ISubNew(torch.nn.Module): def __init__(self): super(ISubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
ISub
false
1,596
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
IAdd
import torch class IAdd(torch.nn.Module): def __init__(self): super(IAdd, self).__init__() def forward(self, x, y): x += y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class IAddNew(torch.nn.Module): def __init__(self): super(IAddNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
IAdd
false
1,597
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
MinElementwise
import torch class MinElementwise(torch.nn.Module): def forward(self, x, y): return torch.min(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_minimum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_minimum_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class MinElementwiseNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
MinElementwise
false
1,598
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
IMul
import torch class IMul(torch.nn.Module): def __init__(self): super(IMul, self).__init__() def forward(self, x, y): x *= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class IMulNew(torch.nn.Module): def __init__(self): super(IMulNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
IMul
false
1,599
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
LT
import torch class LT(torch.nn.Module): def __init__(self): super(LT, self).__init__() def forward(self, x, y): return x < y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_lt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 < tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_lt_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class LTNew(torch.nn.Module): def __init__(self): super(LTNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
LT
false
1,600
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
Pow
import torch class Pow(torch.nn.Module): def __init__(self): super(Pow, self).__init__() def forward(self, x, y): return x ** y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.pow(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class PowNew(torch.nn.Module): def __init__(self): super(PowNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
Pow
false
1,601
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RDivFloat
import torch class RDivFloat(torch.nn.Module): def __init__(self): super(RDivFloat, self).__init__() def forward(self, x): return 100.0 / x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class RDivFloatNew(torch.nn.Module): def __init__(self): super(RDivFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RDivFloat
false
1,602
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TensorClamp
import torch class TensorClamp(torch.nn.Module): def forward(self, x): return x.clamp(-0.1, 0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.1 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 0.1 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
TensorClamp
false
1,603
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RSubInt
import torch class RSubInt(torch.nn.Module): def __init__(self): super(RSubInt, self).__init__() def forward(self, x): return 1 - x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RSubIntNew(torch.nn.Module): def __init__(self): super(RSubIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RSubInt
false
1,604
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RMulInt
import torch class RMulInt(torch.nn.Module): def __init__(self): super(RMulInt, self).__init__() def forward(self, x): return 10 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RMulIntNew(torch.nn.Module): def __init__(self): super(RMulIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RMulInt
false
1,605
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RMulFloat
import torch class RMulFloat(torch.nn.Module): def __init__(self): super(RMulFloat, self).__init__() def forward(self, x): return 10.0 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class RMulFloatNew(torch.nn.Module): def __init__(self): super(RMulFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RMulFloat
false
1,606
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TensorClampOptionMax
import torch class TensorClampOptionMax(torch.nn.Module): def forward(self, x): return x.clamp(max=0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampOptionMaxNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
TensorClampOptionMax
false
1,607
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RDivInt
import torch class RDivInt(torch.nn.Module): def __init__(self): super(RDivInt, self).__init__() def forward(self, x): return 100 / x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class RDivIntNew(torch.nn.Module): def __init__(self): super(RDivIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RDivInt
false
1,608
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TorchSub
import torch class TorchSub(torch.nn.Module): def __init__(self): super(TorchSub, self).__init__() def forward(self, x, y): return torch.sub(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchSubNew(torch.nn.Module): def __init__(self): super(TorchSubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
TorchSub
false
1,609
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
Sub
import torch class Sub(torch.nn.Module): def __init__(self): super(Sub, self).__init__() def forward(self, x, y): return x - y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SubNew(torch.nn.Module): def __init__(self): super(SubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
Sub
false
1,610
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TorchClampMin
import torch class TorchClampMin(torch.nn.Module): def forward(self, x): return torch.clamp_min(x, -0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.1 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class TorchClampMinNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
TorchClampMin
false
1,611
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TorchDiv
import torch class TorchDiv(torch.nn.Module): def __init__(self): super(TorchDiv, self).__init__() def forward(self, x, y): return torch.div(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchDivNew(torch.nn.Module): def __init__(self): super(TorchDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
TorchDiv
false
1,612
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RSubFloat
import torch class RSubFloat(torch.nn.Module): def __init__(self): super(RSubFloat, self).__init__() def forward(self, x): return 1.0 - x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RSubFloatNew(torch.nn.Module): def __init__(self): super(RSubFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RSubFloat
false
1,613
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TensorClampMax
import torch class TensorClampMax(torch.nn.Module): def forward(self, x): return x.clamp_max(0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_max_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampMaxNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
TensorClampMax
false
1,614
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
RpowFloat
import torch class RpowFloat(torch.nn.Module): def __init__(self): super(RpowFloat, self).__init__() def forward(self, x): return 2.0 ** x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.exp2(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RpowFloatNew(torch.nn.Module): def __init__(self): super(RpowFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RpowFloat
false
1,615
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TorchPow
import torch class TorchPow(torch.nn.Module): def __init__(self): super(TorchPow, self).__init__() def forward(self, x, y): return torch.pow(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.pow(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchPowNew(torch.nn.Module): def __init__(self): super(TorchPowNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
TorchPow
false
1,616
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
TransformerEncoderLayerWithConv1d
import torch import torch.nn as nn import torch.nn.functional as F class TransformerEncoderLayerWithConv1d(nn.Module): """ Input and output shape: seqlen x batch_size x dim """ def __init__(self, dim_model, nheads, dim_feedforward, dropout, kernel_size, stride): super(TransformerEncoderLayerWithConv1d, self).__init__() self.encoder_layer = nn.TransformerEncoderLayer(dim_model, nheads, dim_feedforward, dropout) self.conv1d = nn.Conv1d(dim_model, dim_model, kernel_size, stride= stride, padding=1) def forward(self, src, src_mask=None, src_key_padding_mask=None): output = self.encoder_layer(src, src_mask, src_key_padding_mask) output = F.relu(self.conv1d(output.permute(1, 2, 0))) return output.permute(2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'nheads': 4, 'dim_feedforward': 4, 'dropout': 0.5, 'kernel_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_transpose_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * y1 + 48 * x2), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_mul_transpose_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * y1 + 48 * x2), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp4, xmask & ymask) @triton.jit def triton_poi_fused__safe_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__safe_softmax_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_bmm_transpose_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (128 + x0 + 4 * (x0 % 4 // 4) + 16 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) tl.store(out_ptr1 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_13(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf27 = empty_strided_cuda((16, 1, 4), (1, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_mul_transpose_0[grid(16, 4)](buf0, primals_2, buf1, buf27, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) buf28 = empty_strided_cuda((16, 4, 1), (1, 16, 1), torch.float32) triton_poi_fused_mul_transpose_1[grid(16, 4)](buf0, primals_2, buf2, buf28, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK =256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_3[grid(256)](buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del buf4 buf6 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(192)](buf0, primals_2, buf6, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf7 = reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 64), 0) del buf2 buf26 = reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0) del buf1 triton_poi_fused_bmm_transpose_5[grid(64)](buf6, buf7, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf6 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), buf7, out=buf8) buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_6[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.addmm(primals_5, reinterpret_tensor(buf9, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_5 buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf10, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_1, buf10, buf11, buf12, primals_6, primals_7, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0) del buf14 buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_9[grid(64)](buf15, primals_9, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0) del buf16 triton_poi_fused_add_10[grid(64)](buf17, buf13, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf18 = buf12 del buf12 buf19 = buf11 del buf11 triton_poi_fused_native_layer_norm_11[grid(16)](buf17, buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf17, buf18, buf19, primals_12, primals_13, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf18 del buf19 del primals_13 buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_13[grid(16, 4)](buf20, buf21, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_14, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf22, (4, 4, 3), (12, 3, 1)) del buf21 buf23 = buf22 del buf22 buf24 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_14[grid(48)](buf23 , primals_15, buf24, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 return reinterpret_tensor(buf23, (3, 4, 4), (1, 12, 3), 0 ), primals_1, primals_6, primals_12, primals_14, buf5, reinterpret_tensor( buf9, (16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf17, reinterpret_tensor(buf20, (4, 4, 4), (4, 1, 16), 0 ), buf24, primals_10, buf25, primals_8, primals_4, buf26, buf27, buf28 class TransformerEncoderLayerWithConv1dNew(nn.Module): """ Input and output shape: seqlen x batch_size x dim """ def __init__(self, dim_model, nheads, dim_feedforward, dropout, kernel_size, stride): super(TransformerEncoderLayerWithConv1dNew, self).__init__() self.encoder_layer = nn.TransformerEncoderLayer(dim_model, nheads, dim_feedforward, dropout) self.conv1d = nn.Conv1d(dim_model, dim_model, kernel_size, stride= stride, padding=1) def forward(self, input_0): primals_3 = self.encoder_layer.self_attn.in_proj_weight primals_2 = self.encoder_layer.self_attn.in_proj_bias primals_4 = self.encoder_layer.self_attn.out_proj.weight primals_5 = self.encoder_layer.self_attn.out_proj.bias primals_8 = self.encoder_layer.linear1.weight primals_6 = self.encoder_layer.linear1.bias primals_10 = self.encoder_layer.linear2.weight primals_7 = self.encoder_layer.linear2.bias primals_9 = self.encoder_layer.norm1.weight primals_11 = self.encoder_layer.norm1.bias primals_12 = self.encoder_layer.norm2.weight primals_13 = self.encoder_layer.norm2.bias primals_1 = self.conv1d.weight primals_15 = self.conv1d.bias primals_14 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
bliunlpr/pykaldi2
TransformerEncoderLayerWithConv1d
false
1,617
[ "MIT" ]
0
f6020b5dd9900f97ab69c97442a91196a03dd93b
https://github.com/bliunlpr/pykaldi2/tree/f6020b5dd9900f97ab69c97442a91196a03dd93b
EncoderLayer
import math import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-12): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) out = (x - mean) / (std + self.eps) out = self.gamma * out + self.beta return out class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax() def forward(self, q, k, v, mask=None, e=1e-12): batch_size, head, length, d_tensor = k.size() k_t = k.view(batch_size, head, d_tensor, length) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): q, k, v = self.w_q(q), self.w_k(k), self.w_v(v) q, k, v = self.split(q), self.split(k), self.split(v) out, _attention = self.attention(q, k, v, mask=mask) out = self.concat(out) out = self.w_concat(out) return out def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, self.n_head, length, d_tensor) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.view(batch_size, length, d_model) return tensor class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, hidden, drop_prob=0.1): super(PositionwiseFeedForward, self).__init__() self.linear1 = nn.Linear(d_model, hidden) self.linear2 = nn.Linear(hidden, d_model) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=drop_prob) def forward(self, x): x = self.linear1(x) x = self.relu(x) x = self.dropout(x) x = self.linear2(x) return x class EncoderLayer(nn.Module): def __init__(self, d_model, ffn_hidden, n_head, drop_prob): super(EncoderLayer, self).__init__() self.attention = MultiHeadAttention(d_model=d_model, n_head=n_head) self.norm1 = LayerNorm(d_model=d_model) self.dropout1 = nn.Dropout(p=drop_prob) self.ffn = PositionwiseFeedForward(d_model=d_model, hidden= ffn_hidden, drop_prob=drop_prob) self.norm2 = LayerNorm(d_model=d_model) self.dropout2 = nn.Dropout(p=drop_prob) def forward(self, x, s_mask): _x = x x = self.attention(q=x, k=x, v=x, mask=s_mask) x = self.norm1(x + _x) x = self.dropout1(x) _x = x x = self.ffn(x) x = self.norm2(x + _x) x = self.dropout2(x) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'ffn_hidden': 4, 'n_head': 4, 'drop_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1) tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1) tmp12 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1) tmp17 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -9.999999960041972e-13 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -9.999999960041972e-13 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = libdevice.sqrt(tmp6) tmp8 = 1e-12 tmp9 = tmp7 + tmp8 tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-12 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](primals_8, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf4, buf3, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf7, buf4, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0) del buf6 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf8) buf9 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0) del buf5 extern_kernels.addmm(primals_10, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_10 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_std_3[grid(16)](buf11, buf9, primals_1, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_11, buf9, primals_1, buf12, buf11, primals_12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf11 del buf12 del primals_12 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0) del buf14 buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf15, primals_14, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0) del buf16 triton_poi_fused_add_6[grid(64)](buf17, primals_16, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_17, buf17, primals_18, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 return (buf18, primals_1, primals_11, primals_17, buf4, buf7, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor( buf15, (16, 4), (4, 1), 0), buf17, primals_15, buf19, primals_13, primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0)) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-12): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) out = (x - mean) / (std + self.eps) out = self.gamma * out + self.beta return out class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax() def forward(self, q, k, v, mask=None, e=1e-12): batch_size, head, length, d_tensor = k.size() k_t = k.view(batch_size, head, d_tensor, length) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): q, k, v = self.w_q(q), self.w_k(k), self.w_v(v) q, k, v = self.split(q), self.split(k), self.split(v) out, _attention = self.attention(q, k, v, mask=mask) out = self.concat(out) out = self.w_concat(out) return out def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, self.n_head, length, d_tensor) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.view(batch_size, length, d_model) return tensor class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, hidden, drop_prob=0.1): super(PositionwiseFeedForward, self).__init__() self.linear1 = nn.Linear(d_model, hidden) self.linear2 = nn.Linear(hidden, d_model) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=drop_prob) def forward(self, x): x = self.linear1(x) x = self.relu(x) x = self.dropout(x) x = self.linear2(x) return x class EncoderLayerNew(nn.Module): def __init__(self, d_model, ffn_hidden, n_head, drop_prob): super(EncoderLayerNew, self).__init__() self.attention = MultiHeadAttention(d_model=d_model, n_head=n_head) self.norm1 = LayerNorm(d_model=d_model) self.dropout1 = nn.Dropout(p=drop_prob) self.ffn = PositionwiseFeedForward(d_model=d_model, hidden= ffn_hidden, drop_prob=drop_prob) self.norm2 = LayerNorm(d_model=d_model) self.dropout2 = nn.Dropout(p=drop_prob) def forward(self, input_0, input_1): primals_2 = self.attention.w_q.weight primals_3 = self.attention.w_q.bias primals_4 = self.attention.w_k.weight primals_5 = self.attention.w_k.bias primals_6 = self.attention.w_v.weight primals_7 = self.attention.w_v.bias primals_9 = self.attention.w_concat.weight primals_10 = self.attention.w_concat.bias primals_11 = self.norm1.gamma primals_12 = self.norm1.beta primals_13 = self.ffn.linear1.weight primals_14 = self.ffn.linear1.bias primals_15 = self.ffn.linear2.weight primals_16 = self.ffn.linear2.bias primals_17 = self.norm2.gamma primals_18 = self.norm2.beta primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
bsgiovanini/transformer
EncoderLayer
false
1,618
[ "Apache-2.0" ]
0
e128fa862f1b3d17d7b92df169a2bbee3f08366f
https://github.com/bsgiovanini/transformer/tree/e128fa862f1b3d17d7b92df169a2bbee3f08366f
DecoderLayer
import math import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-12): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) out = (x - mean) / (std + self.eps) out = self.gamma * out + self.beta return out class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax() def forward(self, q, k, v, mask=None, e=1e-12): batch_size, head, length, d_tensor = k.size() k_t = k.view(batch_size, head, d_tensor, length) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): q, k, v = self.w_q(q), self.w_k(k), self.w_v(v) q, k, v = self.split(q), self.split(k), self.split(v) out, _attention = self.attention(q, k, v, mask=mask) out = self.concat(out) out = self.w_concat(out) return out def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, self.n_head, length, d_tensor) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.view(batch_size, length, d_model) return tensor class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, hidden, drop_prob=0.1): super(PositionwiseFeedForward, self).__init__() self.linear1 = nn.Linear(d_model, hidden) self.linear2 = nn.Linear(hidden, d_model) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=drop_prob) def forward(self, x): x = self.linear1(x) x = self.relu(x) x = self.dropout(x) x = self.linear2(x) return x class DecoderLayer(nn.Module): def __init__(self, d_model, ffn_hidden, n_head, drop_prob): super(DecoderLayer, self).__init__() self.self_attention = MultiHeadAttention(d_model=d_model, n_head=n_head ) self.norm1 = LayerNorm(d_model=d_model) self.dropout1 = nn.Dropout(p=drop_prob) self.enc_dec_attention = MultiHeadAttention(d_model=d_model, n_head =n_head) self.norm2 = LayerNorm(d_model=d_model) self.dropout2 = nn.Dropout(p=drop_prob) self.ffn = PositionwiseFeedForward(d_model=d_model, hidden= ffn_hidden, drop_prob=drop_prob) self.norm3 = LayerNorm(d_model=d_model) self.dropout3 = nn.Dropout(p=drop_prob) def forward(self, dec, enc, t_mask, s_mask): _x = dec x = self.self_attention(q=dec, k=dec, v=dec, mask=t_mask) x = self.norm1(x + _x) x = self.dropout1(x) if enc is not None: _x = x x = self.enc_dec_attention(q=x, k=enc, v=enc, mask=s_mask) x = self.norm2(x + _x) x = self.dropout2(x) _x = x x = self.ffn(x) x = self.norm3(x + _x) x = self.dropout3(x) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'ffn_hidden': 4, 'n_head': 4, 'drop_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1) tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1) tmp12 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1) tmp17 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -9.999999960041972e-13 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -9.999999960041972e-13 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = libdevice.sqrt(tmp6) tmp8 = 1e-12 tmp9 = tmp7 + tmp8 tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-12 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4,), (1,)) assert_size_stride(primals_25, (4, 4), (4, 1)) assert_size_stride(primals_26, (4,), (1,)) assert_size_stride(primals_27, (4, 4), (4, 1)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) assert_size_stride(primals_30, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](primals_8, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf4, buf3, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf7, buf4, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0) del buf6 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf8) buf9 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0) del buf5 extern_kernels.addmm(primals_10, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_10 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_std_3[grid(16)](buf11, buf9, primals_1, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_11, buf9, primals_1, buf12, buf11, primals_12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf11 del buf12 del primals_12 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_15 buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_16 del primals_17 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_19, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_18 del primals_19 buf17 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf15, (16, 1, 4), (4, 4, 1), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_eq_0[grid(256)](primals_20, buf18, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_20 buf19 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf18, buf17, buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf21 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf17 triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf21, buf18, buf19, buf20, 256, XBLOCK=256, num_warps=4, num_stages=1) buf22 = reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 1), 0) del buf20 extern_kernels.bmm(reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf16, (16, 4, 1), (4, 1, 1), 0), out=buf22) buf23 = reinterpret_tensor(buf19, (16, 4), (4, 1), 0) del buf19 extern_kernels.mm(reinterpret_tensor(buf22, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf23) buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0) del buf23 triton_poi_fused_add_5[grid(64)](buf24, primals_22, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_22 buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_6[grid(64)](primals_23, buf24, primals_24, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_24 buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0) del buf26 buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf27, primals_26, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_26 buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf27, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf28) buf29 = reinterpret_tensor(buf28, (4, 4, 4), (16, 4, 1), 0) del buf28 triton_poi_fused_add_5[grid(64)](buf29, primals_28, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_28 buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_6[grid(64)](primals_29, buf29, primals_30, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_30 return (buf30, primals_1, primals_11, primals_23, primals_29, buf4, buf7, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor( primals_13, (16, 4), (4, 1), 0), buf18, buf21, reinterpret_tensor( buf22, (16, 4), (4, 1), 0), buf24, reinterpret_tensor(buf25, (16, 4 ), (4, 1), 0), reinterpret_tensor(buf27, (16, 4), (4, 1), 0), buf29, primals_27, buf31, primals_25, primals_21, reinterpret_tensor(buf16, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 4), 0), primals_14, primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0)) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-12): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) out = (x - mean) / (std + self.eps) out = self.gamma * out + self.beta return out class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax() def forward(self, q, k, v, mask=None, e=1e-12): batch_size, head, length, d_tensor = k.size() k_t = k.view(batch_size, head, d_tensor, length) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): q, k, v = self.w_q(q), self.w_k(k), self.w_v(v) q, k, v = self.split(q), self.split(k), self.split(v) out, _attention = self.attention(q, k, v, mask=mask) out = self.concat(out) out = self.w_concat(out) return out def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, self.n_head, length, d_tensor) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.view(batch_size, length, d_model) return tensor class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, hidden, drop_prob=0.1): super(PositionwiseFeedForward, self).__init__() self.linear1 = nn.Linear(d_model, hidden) self.linear2 = nn.Linear(hidden, d_model) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=drop_prob) def forward(self, x): x = self.linear1(x) x = self.relu(x) x = self.dropout(x) x = self.linear2(x) return x class DecoderLayerNew(nn.Module): def __init__(self, d_model, ffn_hidden, n_head, drop_prob): super(DecoderLayerNew, self).__init__() self.self_attention = MultiHeadAttention(d_model=d_model, n_head=n_head ) self.norm1 = LayerNorm(d_model=d_model) self.dropout1 = nn.Dropout(p=drop_prob) self.enc_dec_attention = MultiHeadAttention(d_model=d_model, n_head =n_head) self.norm2 = LayerNorm(d_model=d_model) self.dropout2 = nn.Dropout(p=drop_prob) self.ffn = PositionwiseFeedForward(d_model=d_model, hidden= ffn_hidden, drop_prob=drop_prob) self.norm3 = LayerNorm(d_model=d_model) self.dropout3 = nn.Dropout(p=drop_prob) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.self_attention.w_q.weight primals_3 = self.self_attention.w_q.bias primals_4 = self.self_attention.w_k.weight primals_5 = self.self_attention.w_k.bias primals_6 = self.self_attention.w_v.weight primals_7 = self.self_attention.w_v.bias primals_9 = self.self_attention.w_concat.weight primals_10 = self.self_attention.w_concat.bias primals_11 = self.norm1.gamma primals_12 = self.norm1.beta primals_14 = self.enc_dec_attention.w_q.weight primals_15 = self.enc_dec_attention.w_q.bias primals_16 = self.enc_dec_attention.w_k.weight primals_17 = self.enc_dec_attention.w_k.bias primals_18 = self.enc_dec_attention.w_v.weight primals_19 = self.enc_dec_attention.w_v.bias primals_21 = self.enc_dec_attention.w_concat.weight primals_22 = self.enc_dec_attention.w_concat.bias primals_23 = self.norm2.gamma primals_24 = self.norm2.beta primals_25 = self.ffn.linear1.weight primals_26 = self.ffn.linear1.bias primals_27 = self.ffn.linear2.weight primals_28 = self.ffn.linear2.bias primals_29 = self.norm3.gamma primals_30 = self.norm3.beta primals_1 = input_0 primals_13 = input_1 primals_8 = input_2 primals_20 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return output[0]
bsgiovanini/transformer
DecoderLayer
false
1,619
[ "Apache-2.0" ]
0
e128fa862f1b3d17d7b92df169a2bbee3f08366f
https://github.com/bsgiovanini/transformer/tree/e128fa862f1b3d17d7b92df169a2bbee3f08366f
CrossEntropyWithLogSoftmax
import torch import torch.nn as nn class CrossEntropyWithLogSoftmax(nn.Module): def forward(self, y_hat, y): return -(y_hat * y).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_neg_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tmp8 = -tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_neg_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class CrossEntropyWithLogSoftmaxNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cadurosar/graph_kd_dense_cifar100
CrossEntropyWithLogSoftmax
false
1,620
[ "MIT" ]
0
84054ab4f8f61c9db3460993661ba7bf1d951b36
https://github.com/cadurosar/graph_kd_dense_cifar100/tree/84054ab4f8f61c9db3460993661ba7bf1d951b36
KlLoss
import torch import torch.nn as nn def kl_div(p: 'torch.Tensor', q: 'torch.Tensor') ->torch.Tensor: x = p * torch.log(p / q) return x.abs().mean() class KlLoss(nn.Module): def __init__(self) ->None: super().__init__() def forward(self, inputs: 'torch.Tensor', targets: 'torch.Tensor'): loss_kl = kl_div(targets, inputs) return loss_kl def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_log_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 / tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = 256.0 tmp10 = tmp8 / tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_div_log_mean_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def kl_div(p: 'torch.Tensor', q: 'torch.Tensor') ->torch.Tensor: x = p * torch.log(p / q) return x.abs().mean() class KlLossNew(nn.Module): def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
by-liu/RetinalApp
KlLoss
false
1,621
[ "MIT" ]
0
53173b2b20dfcf613a3a22d6caa5178771d14225
https://github.com/by-liu/RetinalApp/tree/53173b2b20dfcf613a3a22d6caa5178771d14225
BatchMeanCrossEntropyWithLogSoftmax
import torch import torch.nn as nn class BatchMeanCrossEntropyWithLogSoftmax(nn.Module): def forward(self, y_hat, y): return -(y_hat * y).sum(dim=1).mean(dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr0 + (16 + x0), xmask) tmp4 = tl.load(in_ptr1 + (16 + x0), xmask) tmp7 = tl.load(in_ptr0 + (32 + x0), xmask) tmp8 = tl.load(in_ptr1 + (32 + x0), xmask) tmp11 = tl.load(in_ptr0 + (48 + x0), xmask) tmp12 = tl.load(in_ptr1 + (48 + x0), xmask) tmp15 = tl.load(in_ptr0 + (64 + x0), xmask) tmp16 = tl.load(in_ptr1 + (64 + x0), xmask) tmp18 = tl.load(in_ptr0 + (80 + x0), xmask) tmp19 = tl.load(in_ptr1 + (80 + x0), xmask) tmp22 = tl.load(in_ptr0 + (96 + x0), xmask) tmp23 = tl.load(in_ptr1 + (96 + x0), xmask) tmp26 = tl.load(in_ptr0 + (112 + x0), xmask) tmp27 = tl.load(in_ptr1 + (112 + x0), xmask) tmp31 = tl.load(in_ptr0 + (128 + x0), xmask) tmp32 = tl.load(in_ptr1 + (128 + x0), xmask) tmp34 = tl.load(in_ptr0 + (144 + x0), xmask) tmp35 = tl.load(in_ptr1 + (144 + x0), xmask) tmp38 = tl.load(in_ptr0 + (160 + x0), xmask) tmp39 = tl.load(in_ptr1 + (160 + x0), xmask) tmp42 = tl.load(in_ptr0 + (176 + x0), xmask) tmp43 = tl.load(in_ptr1 + (176 + x0), xmask) tmp47 = tl.load(in_ptr0 + (192 + x0), xmask) tmp48 = tl.load(in_ptr1 + (192 + x0), xmask) tmp50 = tl.load(in_ptr0 + (208 + x0), xmask) tmp51 = tl.load(in_ptr1 + (208 + x0), xmask) tmp54 = tl.load(in_ptr0 + (224 + x0), xmask) tmp55 = tl.load(in_ptr1 + (224 + x0), xmask) tmp58 = tl.load(in_ptr0 + (240 + x0), xmask) tmp59 = tl.load(in_ptr1 + (240 + x0), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = 4.0 tmp64 = tmp62 / tmp63 tmp65 = -tmp64 tl.store(in_out_ptr0 + x0, tmp65, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_mean_mul_neg_sum_0[grid(16)](buf1, arg0_1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class BatchMeanCrossEntropyWithLogSoftmaxNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cadurosar/graph_kd_dense_cifar100
BatchMeanCrossEntropyWithLogSoftmax
false
1,622
[ "MIT" ]
0
84054ab4f8f61c9db3460993661ba7bf1d951b36
https://github.com/cadurosar/graph_kd_dense_cifar100/tree/84054ab4f8f61c9db3460993661ba7bf1d951b36
GAT
import torch from torch import nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ https://github.com/Diego999/pyGAT/blob/master/layers.py Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, batch_num, node_num, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.batch_num = batch_num self.node_num = node_num torch.device('cuda') self.W = nn.Parameter(torch.empty(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, h, adj): """ :param h: (batch_size, number_nodes, in_features) :param adj: (batch_size, number_nodes, number_nodes) :return: (batch_size, number_nodes, out_features) """ Wh = torch.matmul(h, self.W) e = self.prepare_batch(Wh) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=-1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, Wh) if self.concat: return F.elu(h_prime) else: return h_prime def prepare_batch(self, Wh): """ with batch training :param Wh: (batch_size, number_nodes, out_features) :return: """ _B, _N, _E = Wh.shape Wh1 = torch.matmul(Wh, self.a[:self.out_features, :]) Wh2 = torch.matmul(Wh, self.a[self.out_features:, :]) e = Wh1 + Wh2.permute(0, 2, 1) return self.leakyrelu(e) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, in_feat_dim, nhid, out_feat_dim, dropout, alpha, nheads, batch_num, node_num): """Dense version of GAT.""" super(GAT, self).__init__() self.dropout = dropout self.batch_num = batch_num self.node_num = node_num torch.device('cuda') self.adj = nn.Parameter(torch.ones(self.batch_num, self.node_num, self.node_num)) self.attentions = [GraphAttentionLayer(in_feat_dim, nhid, dropout= dropout, alpha=alpha, batch_num=batch_num, node_num=node_num, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttentionLayer(nhid * nheads, out_feat_dim, dropout=dropout, alpha=alpha, batch_num=batch_num, node_num= node_num, concat=False) def forward(self, x): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, self.adj) for att in self.attentions], dim=-1) x = F.dropout(x, self.dropout, training=self.training) x = F.elu(self.out_att(x, self.adj)) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_feat_dim': 4, 'nhid': 4, 'out_feat_dim': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4, 'batch_num': 4, 'node_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tl.load(in_ptr3 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp11 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr3 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp19 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp20 = tl.load(in_ptr3 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp27 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp28 = tl.load(in_ptr3 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr4 + 4 * x2, xmask, eviction_policy='evict_last').to( tl.int1) tmp35 = tl.load(in_ptr5 + x2, xmask) tmp36 = tl.load(in_ptr6 + 4 * x1, xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr4 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp42 = tl.load(in_ptr6 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr4 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp49 = tl.load(in_ptr6 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp55 = tl.load(in_ptr4 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp56 = tl.load(in_ptr6 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp62 = tl.load(in_ptr7 + 4 * x2, xmask, eviction_policy='evict_last').to( tl.int1) tmp63 = tl.load(in_ptr8 + x2, xmask) tmp64 = tl.load(in_ptr9 + 4 * x1, xmask, eviction_policy='evict_last') tmp69 = tl.load(in_ptr7 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp70 = tl.load(in_ptr9 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp76 = tl.load(in_ptr7 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp77 = tl.load(in_ptr9 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp83 = tl.load(in_ptr7 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp84 = tl.load(in_ptr9 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp90 = tl.load(in_ptr10 + 4 * x2, xmask, eviction_policy='evict_last').to( tl.int1) tmp91 = tl.load(in_ptr11 + x2, xmask) tmp92 = tl.load(in_ptr12 + 4 * x1, xmask, eviction_policy='evict_last') tmp97 = tl.load(in_ptr10 + (1 + 4 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp98 = tl.load(in_ptr12 + (1 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp104 = tl.load(in_ptr10 + (2 + 4 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp105 = tl.load(in_ptr12 + (2 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp111 = tl.load(in_ptr10 + (3 + 4 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp112 = tl.load(in_ptr12 + (3 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp13 = tmp2 + tmp12 tmp14 = tmp13 * tmp5 tmp15 = tl.where(tmp11, tmp13, tmp14) tmp16 = tl.where(tmp10, tmp15, tmp8) tmp17 = triton_helpers.maximum(tmp9, tmp16) tmp21 = tmp2 + tmp20 tmp22 = tmp21 * tmp5 tmp23 = tl.where(tmp19, tmp21, tmp22) tmp24 = tl.where(tmp18, tmp23, tmp8) tmp25 = triton_helpers.maximum(tmp17, tmp24) tmp29 = tmp2 + tmp28 tmp30 = tmp29 * tmp5 tmp31 = tl.where(tmp27, tmp29, tmp30) tmp32 = tl.where(tmp26, tmp31, tmp8) tmp33 = triton_helpers.maximum(tmp25, tmp32) tmp37 = tmp35 + tmp36 tmp38 = tmp37 * tmp5 tmp39 = tl.where(tmp34, tmp37, tmp38) tmp40 = tl.where(tmp0, tmp39, tmp8) tmp43 = tmp35 + tmp42 tmp44 = tmp43 * tmp5 tmp45 = tl.where(tmp41, tmp43, tmp44) tmp46 = tl.where(tmp10, tmp45, tmp8) tmp47 = triton_helpers.maximum(tmp40, tmp46) tmp50 = tmp35 + tmp49 tmp51 = tmp50 * tmp5 tmp52 = tl.where(tmp48, tmp50, tmp51) tmp53 = tl.where(tmp18, tmp52, tmp8) tmp54 = triton_helpers.maximum(tmp47, tmp53) tmp57 = tmp35 + tmp56 tmp58 = tmp57 * tmp5 tmp59 = tl.where(tmp55, tmp57, tmp58) tmp60 = tl.where(tmp26, tmp59, tmp8) tmp61 = triton_helpers.maximum(tmp54, tmp60) tmp65 = tmp63 + tmp64 tmp66 = tmp65 * tmp5 tmp67 = tl.where(tmp62, tmp65, tmp66) tmp68 = tl.where(tmp0, tmp67, tmp8) tmp71 = tmp63 + tmp70 tmp72 = tmp71 * tmp5 tmp73 = tl.where(tmp69, tmp71, tmp72) tmp74 = tl.where(tmp10, tmp73, tmp8) tmp75 = triton_helpers.maximum(tmp68, tmp74) tmp78 = tmp63 + tmp77 tmp79 = tmp78 * tmp5 tmp80 = tl.where(tmp76, tmp78, tmp79) tmp81 = tl.where(tmp18, tmp80, tmp8) tmp82 = triton_helpers.maximum(tmp75, tmp81) tmp85 = tmp63 + tmp84 tmp86 = tmp85 * tmp5 tmp87 = tl.where(tmp83, tmp85, tmp86) tmp88 = tl.where(tmp26, tmp87, tmp8) tmp89 = triton_helpers.maximum(tmp82, tmp88) tmp93 = tmp91 + tmp92 tmp94 = tmp93 * tmp5 tmp95 = tl.where(tmp90, tmp93, tmp94) tmp96 = tl.where(tmp0, tmp95, tmp8) tmp99 = tmp91 + tmp98 tmp100 = tmp99 * tmp5 tmp101 = tl.where(tmp97, tmp99, tmp100) tmp102 = tl.where(tmp10, tmp101, tmp8) tmp103 = triton_helpers.maximum(tmp96, tmp102) tmp106 = tmp91 + tmp105 tmp107 = tmp106 * tmp5 tmp108 = tl.where(tmp104, tmp106, tmp107) tmp109 = tl.where(tmp18, tmp108, tmp8) tmp110 = triton_helpers.maximum(tmp103, tmp109) tmp113 = tmp91 + tmp112 tmp114 = tmp113 * tmp5 tmp115 = tl.where(tmp111, tmp113, tmp114) tmp116 = tl.where(tmp26, tmp115, tmp8) tmp117 = triton_helpers.maximum(tmp110, tmp116) tl.store(out_ptr0 + x2, tmp33, xmask) tl.store(out_ptr1 + x2, tmp61, xmask) tl.store(out_ptr2 + x2, tmp89, xmask) tl.store(out_ptr3 + x2, tmp117, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x3, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x3, xmask).to(tl.int1) tmp14 = tl.load(in_ptr6 + x4, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr7 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr8 + x4, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr9 + x3, xmask).to(tl.int1) tmp24 = tl.load(in_ptr10 + x4, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr11 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr12 + x4, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr13 + x3, xmask).to(tl.int1) tmp34 = tl.load(in_ptr14 + x4, xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr15 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr16 + x4, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tmp16 * tmp5 tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tl.where(tmp0, tmp18, tmp8) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp26 = tmp24 + tmp25 tmp27 = tmp26 * tmp5 tmp28 = tl.where(tmp23, tmp26, tmp27) tmp29 = tl.where(tmp0, tmp28, tmp8) tmp31 = tmp29 - tmp30 tmp32 = tl_math.exp(tmp31) tmp36 = tmp34 + tmp35 tmp37 = tmp36 * tmp5 tmp38 = tl.where(tmp33, tmp36, tmp37) tmp39 = tl.where(tmp0, tmp38, tmp8) tmp41 = tmp39 - tmp40 tmp42 = tl_math.exp(tmp41) tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp22, xmask) tl.store(out_ptr2 + x3, tmp32, xmask) tl.store(out_ptr3 + x3, tmp42, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x2, tmp52, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tl.load(in_ptr3 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp11 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr3 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp19 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp20 = tl.load(in_ptr3 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp27 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp28 = tl.load(in_ptr3 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp13 = tmp2 + tmp12 tmp14 = tmp13 * tmp5 tmp15 = tl.where(tmp11, tmp13, tmp14) tmp16 = tl.where(tmp10, tmp15, tmp8) tmp17 = triton_helpers.maximum(tmp9, tmp16) tmp21 = tmp2 + tmp20 tmp22 = tmp21 * tmp5 tmp23 = tl.where(tmp19, tmp21, tmp22) tmp24 = tl.where(tmp18, tmp23, tmp8) tmp25 = triton_helpers.maximum(tmp17, tmp24) tmp29 = tmp2 + tmp28 tmp30 = tmp29 * tmp5 tmp31 = tl.where(tmp27, tmp29, tmp30) tmp32 = tl.where(tmp26, tmp31, tmp8) tmp33 = triton_helpers.maximum(tmp25, tmp32) tl.store(out_ptr0 + x2, tmp33, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x3, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_elu_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp8 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tmp8 > tmp1 tmp10 = tmp8 * tmp3 tmp11 = libdevice.expm1(tmp10) tmp12 = tmp11 * tmp3 tmp13 = tl.where(tmp9, tmp10, tmp12) tmp15 = tmp14 > tmp1 tmp16 = tmp14 * tmp3 tmp17 = libdevice.expm1(tmp16) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp15, tmp16, tmp18) tmp20 = triton_helpers.maximum(tmp13, tmp19) tmp22 = tmp21 > tmp1 tmp23 = tmp21 * tmp3 tmp24 = libdevice.expm1(tmp23) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp22, tmp23, tmp25) tmp27 = triton_helpers.maximum(tmp20, tmp26) tmp29 = tmp28 > tmp1 tmp30 = tmp28 * tmp3 tmp31 = libdevice.expm1(tmp30) tmp32 = tmp31 * tmp3 tmp33 = tl.where(tmp29, tmp30, tmp32) tmp34 = triton_helpers.maximum(tmp27, tmp33) tmp35 = tmp7 - tmp34 tl.store(out_ptr0 + x3, tmp35, xmask) @triton.jit def triton_poi_fused__log_softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (8, 1), (1, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 1), (1, 1 ), 0), out=buf1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 1), (1, 1 ), 4), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_leaky_relu_0[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_gt_1[grid(64)](primals_2, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf9) del primals_5 buf10 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1 ), 0), out=buf10) buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1 ), 4), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(64)](buf10, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_7, out=buf17) del primals_7 buf18 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1, 1), 0), out=buf18) buf19 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1, 1), 4), out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(64)](buf18, buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_9, out=buf25) del primals_9 buf26 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1, 1), 0), out=buf26) buf27 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1, 1), 4), out=buf27) buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(64)](buf26, buf27, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf21 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf29 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(16)](buf4, buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19, buf28, buf26, buf27, buf5, buf13, buf21, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(64)](buf4, buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20, buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14, buf22, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del buf10 del buf11 del buf13 del buf18 del buf19 del buf2 del buf21 del buf26 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf6 del buf6 extern_kernels.bmm(buf7, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf8) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf14, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) buf16 = buf14 del buf14 extern_kernels.bmm(buf15, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), out=buf16) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf22, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) buf24 = buf22 del buf22 extern_kernels.bmm(buf23, reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0), out=buf24) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) buf32 = buf30 del buf30 extern_kernels.bmm(buf31, reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0), out=buf32) buf33 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_5[grid(256)](buf8, buf16, buf24, buf32, buf33, 256, XBLOCK=256, num_warps=4, num_stages=1) buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf33, (16, 16), (16, 1), 0), primals_11, out=buf34) buf35 = reinterpret_tensor(buf5, (16, 1), (1, 1), 0) del buf5 extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1, 1), 0), out=buf35) buf36 = reinterpret_tensor(buf29, (16, 1), (1, 1), 0) del buf29 extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1, 1), 4), out=buf36) buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(64)](buf35, buf36, buf37, 64, XBLOCK=64, num_warps=1, num_stages=1) buf38 = reinterpret_tensor(buf27, (4, 4, 1), (4, 1, 16), 0) del buf27 triton_poi_fused__softmax_add_leaky_relu_mul_where_6[grid(16)](buf4, buf37, buf35, buf36, buf38, 16, XBLOCK=16, num_warps=1, num_stages=1) buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_7[grid(64)](buf4, buf37, buf35, buf36, buf38, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf35 del buf36 del buf38 buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf39, buf40, 64, XBLOCK=64, num_warps=1, num_stages=1) buf41 = buf39 del buf39 extern_kernels.bmm(buf40, reinterpret_tensor(buf34, (4, 4, 4), (16, 4, 1), 0), out=buf41) buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_elu_8[grid(64)](buf41, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_9[grid(64)](buf42, buf43, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf42 return (buf43, buf0, buf3, buf4, buf7, buf8, buf9, buf12, buf15, buf16, buf17, buf20, buf23, buf24, buf25, buf28, buf31, buf32, buf34, buf37, buf40, buf41, buf43, reinterpret_tensor(primals_12, (1, 4), (1, 1), 4), reinterpret_tensor(primals_12, (1, 4), (1, 1), 0), reinterpret_tensor(buf33, (16, 16), (1, 16), 0), reinterpret_tensor (primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 4), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(primals_8, (1, 4), (1, 1), 4), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), reinterpret_tensor(primals_6, (1, 4), (1, 1), 4), reinterpret_tensor(primals_6, (1, 4), (1, 1), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 4), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0)) class GraphAttentionLayer(nn.Module): """ https://github.com/Diego999/pyGAT/blob/master/layers.py Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, batch_num, node_num, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.batch_num = batch_num self.node_num = node_num torch.device('cuda') self.W = nn.Parameter(torch.empty(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, h, adj): """ :param h: (batch_size, number_nodes, in_features) :param adj: (batch_size, number_nodes, number_nodes) :return: (batch_size, number_nodes, out_features) """ Wh = torch.matmul(h, self.W) e = self.prepare_batch(Wh) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=-1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, Wh) if self.concat: return F.elu(h_prime) else: return h_prime def prepare_batch(self, Wh): """ with batch training :param Wh: (batch_size, number_nodes, out_features) :return: """ _B, _N, _E = Wh.shape Wh1 = torch.matmul(Wh, self.a[:self.out_features, :]) Wh2 = torch.matmul(Wh, self.a[self.out_features:, :]) e = Wh1 + Wh2.permute(0, 2, 1) return self.leakyrelu(e) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, in_feat_dim, nhid, out_feat_dim, dropout, alpha, nheads, batch_num, node_num): """Dense version of GAT.""" super(GATNew, self).__init__() self.dropout = dropout self.batch_num = batch_num self.node_num = node_num torch.device('cuda') self.adj = nn.Parameter(torch.ones(self.batch_num, self.node_num, self.node_num)) self.attentions = [GraphAttentionLayer(in_feat_dim, nhid, dropout= dropout, alpha=alpha, batch_num=batch_num, node_num=node_num, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttentionLayer(nhid * nheads, out_feat_dim, dropout=dropout, alpha=alpha, batch_num=batch_num, node_num= node_num, concat=False) def forward(self, input_0): primals_1 = self.adj primals_3 = self.attention_0.W primals_4 = self.attention_0.a primals_5 = self.attention_1.W primals_6 = self.attention_1.a primals_7 = self.attention_2.W primals_8 = self.attention_2.a primals_9 = self.attention_3.W primals_10 = self.attention_3.a primals_11 = self.out_att.W primals_12 = self.out_att.a primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
bpilseo/VLP_GAT
GAT
false
1,623
[ "MIT" ]
0
ca8a2594036ab8fe4a180e5ced87f59f8984e54f
https://github.com/bpilseo/VLP_GAT/tree/ca8a2594036ab8fe4a180e5ced87f59f8984e54f
RemoveChannelMeanStd
import torch class RemoveChannelMeanStd(torch.nn.Module): def forward(self, x): x2 = x.view(x.size(0), x.size(1), -1) mean = x2.mean(dim=2).view(x.size(0), x.size(1), 1, 1) std = x2.std(dim=2).view(x.size(0), x.size(1), 1, 1) return (x - mean) / std def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mean_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = tmp0 - tmp20 tmp22 = 15.0 tmp23 = tmp18 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp21 / tmp24 tl.store(out_ptr2 + (r1 + 16 * x0), tmp25, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mean_std_sub_0[grid(16)](arg0_1, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf4, class RemoveChannelMeanStdNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cadurosar/graph_kd_dense_cifar100
RemoveChannelMeanStd
false
1,624
[ "MIT" ]
0
84054ab4f8f61c9db3460993661ba7bf1d951b36
https://github.com/cadurosar/graph_kd_dense_cifar100/tree/84054ab4f8f61c9db3460993661ba7bf1d951b36
BERTEmbedding2
import torch import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim class LearnedPositionalEmbedding2(nn.Module): def __init__(self, d_model, max_len=512): super().__init__() pe = torch.zeros(max_len, d_model).float() pe.require_grad = True pe = pe.unsqueeze(0) self.pe = nn.Parameter(pe) torch.nn.init.normal_(self.pe, std=0.02) def forward(self, x): return self.pe[:, :x.size(1)] class BERTEmbedding2(nn.Module): """ BERT Embedding which is consisted with under features 1. PositionalEmbedding : adding positional information using sin, cos sum of all these features are output of BERTEmbedding """ def __init__(self, input_dim, max_len, dropout=0.1): """ :param vocab_size: total vocab size :param embed_size: embedding size of token embedding :param dropout: dropout rate """ super().__init__() self.learnedPosition = LearnedPositionalEmbedding2(d_model= input_dim, max_len=max_len) self.dropout = nn.Dropout(p=dropout) def forward(self, sequence): x = self.learnedPosition(sequence) + sequence return self.dropout(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'max_len': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LearnedPositionalEmbedding2(nn.Module): def __init__(self, d_model, max_len=512): super().__init__() pe = torch.zeros(max_len, d_model).float() pe.require_grad = True pe = pe.unsqueeze(0) self.pe = nn.Parameter(pe) torch.nn.init.normal_(self.pe, std=0.02) def forward(self, x): return self.pe[:, :x.size(1)] class BERTEmbedding2New(nn.Module): """ BERT Embedding which is consisted with under features 1. PositionalEmbedding : adding positional information using sin, cos sum of all these features are output of BERTEmbedding """ def __init__(self, input_dim, max_len, dropout=0.1): """ :param vocab_size: total vocab size :param embed_size: embedding size of token embedding :param dropout: dropout rate """ super().__init__() self.learnedPosition = LearnedPositionalEmbedding2(d_model= input_dim, max_len=max_len) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0): primals_1 = self.learnedPosition.pe primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
byeongjokim/LateTemporalModeling3DCNN_for_sign
BERTEmbedding2
false
1,625
[ "MIT" ]
0
e3a802fcf91dc3930aea782464ee34d9b747d3ab
https://github.com/byeongjokim/LateTemporalModeling3DCNN_for_sign/tree/e3a802fcf91dc3930aea782464ee34d9b747d3ab
PositionwiseFeedForward
import math import torch import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim class GELU(nn.Module): """ Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU """ def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class PositionwiseFeedForward(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) self.activation = GELU() def forward(self, x): return self.w_2(self.dropout(self.activation(self.w_1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 class GELU(nn.Module): """ Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU """ def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class PositionwiseFeedForwardNew(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) self.activation = GELU() def forward(self, input_0): primals_1 = self.w_1.weight primals_2 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
byeongjokim/LateTemporalModeling3DCNN_for_sign
PositionwiseFeedForward
false
1,626
[ "MIT" ]
0
e3a802fcf91dc3930aea782464ee34d9b747d3ab
https://github.com/byeongjokim/LateTemporalModeling3DCNN_for_sign/tree/e3a802fcf91dc3930aea782464ee34d9b747d3ab
Evidential_layer
import torch import torch.nn as nn class Evidential_layer(nn.Module): def __init__(self, in_dim, num_classes): super(Evidential_layer, self).__init__() self.num_classes = num_classes self.fc1 = nn.Linear(in_dim, 2 * self.num_classes) self.relu = torch.nn.ReLU() def forward(self, x): x = self.fc1(x) return self.relu(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_2, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class Evidential_layerNew(nn.Module): def __init__(self, in_dim, num_classes): super(Evidential_layerNew, self).__init__() self.num_classes = num_classes self.fc1 = nn.Linear(in_dim, 2 * self.num_classes) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
caisr-hh/DEED
Evidential_layer
false
1,627
[ "MIT" ]
0
2a9edb1df31d99c1e8da177dec696d7c90c2e7de
https://github.com/caisr-hh/DEED/tree/2a9edb1df31d99c1e8da177dec696d7c90c2e7de
NextSentencePrediction
import torch import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim class NextSentencePrediction(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, x): return self.softmax(self.linear(x[:, 0])) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused__log_softmax_add_1[grid(32)](buf1, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0) del buf1 triton_poi_fused__log_softmax_2[grid(32)](buf2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf2 return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3 class NextSentencePredictionNew(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
byeongjokim/LateTemporalModeling3DCNN_for_sign
NextSentencePrediction
false
1,628
[ "MIT" ]
0
e3a802fcf91dc3930aea782464ee34d9b747d3ab
https://github.com/byeongjokim/LateTemporalModeling3DCNN_for_sign/tree/e3a802fcf91dc3930aea782464ee34d9b747d3ab
BatchMeanKLDivWithLogSoftmax
import torch import torch.nn as nn class BatchMeanKLDivWithLogSoftmax(nn.Module): def forward(self, p, log_q, log_p): return (p * log_p - p * log_q).sum(dim=1).mean(dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 - tmp4 tmp8 = tmp6 * tmp7 tmp10 = tmp6 * tmp9 tmp11 = tmp8 - tmp10 tmp12 = tmp5 + tmp11 tmp15 = tmp13 * tmp14 tmp17 = tmp13 * tmp16 tmp18 = tmp15 - tmp17 tmp19 = tmp12 + tmp18 tmp22 = tmp20 * tmp21 tmp24 = tmp20 * tmp23 tmp25 = tmp22 - tmp24 tmp26 = tmp19 + tmp25 tl.store(out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sub_sum_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mean_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 return buf1, class BatchMeanKLDivWithLogSoftmaxNew(nn.Module): def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
cadurosar/graph_kd_dense_cifar100
BatchMeanKLDivWithLogSoftmax
false
1,629
[ "MIT" ]
0
84054ab4f8f61c9db3460993661ba7bf1d951b36
https://github.com/cadurosar/graph_kd_dense_cifar100/tree/84054ab4f8f61c9db3460993661ba7bf1d951b36
BWCEWLoss
import torch from torch import Tensor from typing import Optional from torch import nn class BWCEWLoss(nn.Module): """ Binary weighted cross entropy loss. """ def __init__(self, positive_class_weight: 'Optional[Tensor]'=None, robust_lambda: 'int'=0, confidence_penalty: 'int'=0, **kwargs): super().__init__() self.loss_fn = nn.BCEWithLogitsLoss(pos_weight= positive_class_weight, **kwargs) self.robust_lambda = robust_lambda self.confidence_penalty = confidence_penalty def forward(self, preds: 'torch.Tensor', target: 'torch.Tensor'): train_loss = self.loss_fn(preds, target.float()) if self.robust_lambda > 0: train_loss = (1 - self.robust_lambda ) * train_loss + self.robust_lambda / 2 train_mean_loss = torch.mean(train_loss) if self.confidence_penalty > 0: probabilities = torch.sigmoid(preds) mean_penalty = utils.mean_confidence_penalty(probabilities, 2) train_mean_loss += self.confidence_penalty * mean_penalty return train_mean_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor from typing import Optional from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = tmp17 / tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BWCEWLossNew(nn.Module): """ Binary weighted cross entropy loss. """ def __init__(self, positive_class_weight: 'Optional[Tensor]'=None, robust_lambda: 'int'=0, confidence_penalty: 'int'=0, **kwargs): super().__init__() self.loss_fn = nn.BCEWithLogitsLoss(pos_weight= positive_class_weight, **kwargs) self.robust_lambda = robust_lambda self.confidence_penalty = confidence_penalty def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
carlogrisetti/ludwig
BWCEWLoss
false
1,630
[ "Apache-2.0" ]
0
5c0887f14867e1577e0ddc3806c5cf7a781fb665
https://github.com/carlogrisetti/ludwig/tree/5c0887f14867e1577e0ddc3806c5cf7a781fb665
ConvLayer
import torch class ConvLayer(torch.nn.Module): def __init__(self, in_features, out_features, kernel_size=1, stride=1): super().__init__() padding = kernel_size // 2 self.refpadding = torch.nn.ReflectionPad2d(padding) self.conv = torch.nn.Conv2d(in_features, out_features, kernel_size, stride) def forward(self, x): x = self.refpadding(x) return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math .abs(-3 + x1) + 16 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class ConvLayerNew(torch.nn.Module): def __init__(self, in_features, out_features, kernel_size=1, stride=1): super().__init__() padding = kernel_size // 2 self.refpadding = torch.nn.ReflectionPad2d(padding) self.conv = torch.nn.Conv2d(in_features, out_features, kernel_size, stride) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
bruchano/GTAVSelfDriving
ConvLayer
false
1,631
[ "MIT" ]
0
c5f929f793b48e47725dbdb8f0991e04e3c43eba
https://github.com/bruchano/GTAVSelfDriving/tree/c5f929f793b48e47725dbdb8f0991e04e3c43eba
ReduceMax
import torch class ReduceMax(torch.nn.Module): def forward(self, inputs, mask=None): return torch.amax(inputs, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_amax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_amax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class ReduceMaxNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
carlogrisetti/ludwig
ReduceMax
false
1,632
[ "Apache-2.0" ]
0
5c0887f14867e1577e0ddc3806c5cf7a781fb665
https://github.com/carlogrisetti/ludwig/tree/5c0887f14867e1577e0ddc3806c5cf7a781fb665
SingleLayer
import torch import torch.nn as nn class SingleLayer(nn.Module): def __init__(self, nChannels, growthRate): super(SingleLayer, self).__init__() self.bn1 = nn.GroupNorm(nChannels, nChannels, affine=True) self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(self.relu(self.bn1(x))) out = torch.cat((x, out), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nChannels': 4, 'growthRate': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_0[grid(16)](primals_3, primals_1, primals_2, buf0, buf3, buf6, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](primals_3, buf4, buf5, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf4 return buf5, primals_3, primals_4, buf3, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 1), 0) class SingleLayerNew(nn.Module): def __init__(self, nChannels, growthRate): super(SingleLayerNew, self).__init__() self.bn1 = nn.GroupNorm(nChannels, nChannels, affine=True) self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.bn1.weight primals_2 = self.bn1.bias primals_4 = self.conv1.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
cadurosar/graph_kd_dense_cifar100
SingleLayer
false
1,633
[ "MIT" ]
0
84054ab4f8f61c9db3460993661ba7bf1d951b36
https://github.com/cadurosar/graph_kd_dense_cifar100/tree/84054ab4f8f61c9db3460993661ba7bf1d951b36
TransformerLayer
import math import torch import uuid from torch import Tensor import torch.nn as nn from typing import Tuple import torch.nn.functional as F from typing import Optional from typing import Dict from torch.nn import Parameter def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def utils_softmax(x, dim: 'int', onnx_trace: 'bool'=False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32) def with_incremental_state(cls): cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls. __bases__ if b != FairseqIncrementalState) return cls class ESM1LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12, affine=True): """Construct a layernorm layer in the TF style (eps inside the sqrt).""" super().__init__() self.hidden_size = (hidden_size,) if isinstance(hidden_size, int ) else tuple(hidden_size) self.eps = eps self.affine = bool(affine) if self.affine: self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) else: self.weight, self.bias = None, None def forward(self, x): dims = tuple(-(i + 1) for i in range(len(self.hidden_size))) means = x.mean(dims, keepdim=True) x_zeromean = x - means variances = x_zeromean.pow(2).mean(dims, keepdim=True) x = x_zeromean / torch.sqrt(variances + self.eps) if self.affine: x = self.weight * x + self.bias return x class FairseqIncrementalState(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_incremental_state() def init_incremental_state(self): self._incremental_state_id = str(uuid.uuid4()) def _get_full_incremental_state_key(self, key: 'str') ->str: return '{}.{}'.format(self._incremental_state_id, key) def get_incremental_state(self, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str' ) ->Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" full_key = self._get_full_incremental_state_key(key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(self, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str', value: 'Dict[str, Optional[Tensor]]') ->Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = self._get_full_incremental_state_key(key) incremental_state[full_key] = value return incremental_state @with_incremental_state class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout= 0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and value to be of the same size' self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias) self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.enable_torch_version = False if hasattr(F, 'multi_head_attention_forward'): self.enable_torch_version = True else: self.enable_torch_version = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward(self, query, key: 'Optional[Tensor]', value: 'Optional[Tensor]', key_padding_mask: 'Optional[Tensor]'=None, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]'=None, need_weights: 'bool'=True, static_kv: 'bool'=False, attn_mask: 'Optional[Tensor]'=None, before_softmax: 'bool'=False, need_head_weights: 'bool'=False) ->Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if (self.enable_torch_version and not self.onnx_trace and incremental_state is None and not static_kv and not torch.jit. is_scripting() and not need_head_weights): assert key is not None and value is not None return F.multi_head_attention_forward(query, key, value, self. embed_dim, self.num_heads, torch.empty([0]), torch.cat(( self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj. weight, k_proj_weight=self.k_proj.weight, v_proj_weight= self.v_proj.weight) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and 'prev_key' in saved_state: if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1) ], dim=1) q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if saved_state is not None: if 'prev_key' in saved_state: _prev_key = saved_state['prev_key'] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self. head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if 'prev_value' in saved_state: _prev_value = saved_state['prev_value'] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: 'Optional[Tensor]' = None if 'prev_key_padding_mask' in saved_state: prev_key_padding_mask = saved_state['prev_key_padding_mask'] assert k is not None and v is not None key_padding_mask = (MultiheadAttention. _append_prev_key_padding_mask(key_padding_mask= key_padding_mask, prev_key_padding_mask= prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv)) saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self. head_dim) saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state['prev_key_padding_mask'] = key_padding_mask assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat([key_padding_mask, torch.zeros (key_padding_mask.size(0), 1).type_as(key_padding_mask) ], dim=1) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill(key_padding_mask. unsqueeze(1).unsqueeze(2), float('-inf')) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace =self.onnx_trace) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p= self.dropout, training=self.training) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. head_dim] if self.onnx_trace and attn.size(1) == 1: attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: 'Optional[Tensor]' = None if need_weights: attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) if not need_head_weights: attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask(key_padding_mask: 'Optional[Tensor]', prev_key_padding_mask: 'Optional[Tensor]', batch_size: 'int', src_len: 'int', static_kv: 'bool') ->Optional[Tensor]: if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), key_padding_mask.float()], dim=1) elif prev_key_padding_mask is not None: filler = torch.zeros((batch_size, src_len - prev_key_padding_mask.size(1)), device= prev_key_padding_mask.device) new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), filler.float()], dim=1) elif key_padding_mask is not None: filler = torch.zeros((batch_size, src_len - key_padding_mask. size(1)), device=key_padding_mask.device) new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state(self, incremental_state: 'Dict[str, Dict[str, Optional[Tensor]]]', new_order: 'Tensor'): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size(0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer(self, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]') ->Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, 'attn_state') if result is not None: return result else: empty_result: 'Dict[str, Optional[Tensor]]' = {} return empty_result def _set_input_buffer(self, incremental_state: 'Dict[str, Dict[str, Optional[Tensor]]]', buffer: 'Dict[str, Optional[Tensor]]'): return self.set_incremental_state(incremental_state, 'attn_state', buffer) def apply_sparse_mask(attn_weights, tgt_len: 'int', src_len: 'int', bsz: 'int'): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + '.' if name != '' else '' items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + 'in_proj_weight'): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + 'q_proj.weight'] = state_dict[k][:dim] items_to_add[prefix + 'k_proj.weight'] = state_dict[k][dim: 2 * dim] items_to_add[prefix + 'v_proj.weight'] = state_dict[k][2 * dim: ] keys_to_remove.append(k) k_bias = prefix + 'in_proj_bias' if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + 'q_proj.bias'] = state_dict[k_bias][: dim] items_to_add[prefix + 'k_proj.bias'] = state_dict[k_bias][ dim:2 * dim] items_to_add[prefix + 'v_proj.bias'] = state_dict[k_bias][ 2 * dim:] keys_to_remove.append(prefix + 'in_proj_bias') for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value class TransformerLayer(nn.Module): """Transformer layer block.""" def __init__(self, embed_dim, ffn_embed_dim, attention_heads, add_bias_kv=True, use_esm1b_layer_norm=False): super().__init__() self.embed_dim = embed_dim self.ffn_embed_dim = ffn_embed_dim self.attention_heads = attention_heads self._init_submodules(add_bias_kv, use_esm1b_layer_norm) def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm): BertLayerNorm = (ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm) self.self_attn = MultiheadAttention(self.embed_dim, self. attention_heads, add_bias_kv=add_bias_kv, add_zero_attn=False) self.self_attn_layer_norm = BertLayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim) self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim) self.final_layer_norm = BertLayerNorm(self.embed_dim) def forward(self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False): residual = x x = self.self_attn_layer_norm(x) x, attn = self.self_attn(query=x, key=x, value=x, key_padding_mask= self_attn_padding_mask, need_weights=True, need_head_weights= need_head_weights, attn_mask=self_attn_mask) x = residual + x residual = x x = self.final_layer_norm(x) x = gelu(self.fc1(x)) x = self.fc2(x) x = residual + x return x, attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'ffn_embed_dim': 4, 'attention_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import uuid from torch import Tensor import torch.nn as nn from typing import Tuple import torch.nn.functional as F from typing import Optional from typing import Dict from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = x2 % 4 tl.full([1], 0, tl.int64) tmp4 = tl.full([1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr0 + x0 % 4, tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr1 + (-4 + x0 % 4), tmp10 & xmask, eviction_policy ='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1], 12, tl.int64) tmp15 = tl.load(in_ptr2 + (-8 + x0 % 4), tmp12 & xmask, eviction_policy ='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(in_out_ptr0 + x2, tmp20, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 5 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 5 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 5 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + 5 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp0 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tmp1 - tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp8 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp3 / tmp4 tl.store(in_out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mean_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 80 * x1), xmask) tmp1 = tl.load(in_ptr0 + (20 + x0 + 80 * x1), xmask) tmp3 = tl.load(in_ptr0 + (40 + x0 + 80 * x1), xmask) tmp5 = tl.load(in_ptr0 + (60 + x0 + 80 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mean_pow_sub_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_8, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(64)](primals_2, buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((12,), (1,), torch.float32) triton_poi_fused_cat_2[grid(12)](primals_4, primals_5, primals_6, buf3, 12, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(buf3, (4,), (1,), 4), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(buf3, (4,), (1,), 8), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del buf3 buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(80)](buf5, primals_8, buf6, 80, XBLOCK= 128, num_warps=4, num_stages=1) del primals_8 buf7 = reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 64), 0) del buf2 triton_poi_fused_mul_4[grid(64)](buf7, primals_4, primals_5, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 del primals_5 del primals_6 buf8 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(80)](buf4, primals_7, buf8, 80, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf9 = empty_strided_cuda((16, 4, 5), (20, 5, 1), torch.float32) extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 1, 5), (1, 0, 16), 0), out=buf9) buf10 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 64), 0) del buf4 buf11 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 64), 0) del buf5 triton_poi_fused__softmax_5[grid(64)](buf9, buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf9 del buf9 triton_poi_fused__softmax_6[grid(320)](buf12, buf10, buf11, 320, XBLOCK=256, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 1), 0) del buf11 extern_kernels.bmm(buf12, reinterpret_tensor(buf6, (16, 5, 1), (1, 16, 0), 0), out=buf13) buf14 = reinterpret_tensor(buf10, (4, 16, 1), (16, 1, 1), 0) del buf10 triton_poi_fused_clone_7[grid(4, 16)](buf13, buf14, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0) del buf13 extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_10 buf16 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_mean_8[grid(80)](buf12, buf16, 80, XBLOCK=128, num_warps=4, num_stages=1) buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_pow_sub_9[grid(16)](primals_1, buf15, buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_10[grid(64)](primals_14, primals_1, buf15, buf17, buf18, primals_15, buf19, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf17 del buf18 del primals_15 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf20) del primals_17 buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_erf_mul_11[grid(64)](buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf21, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf22) buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0) del buf22 triton_poi_fused_add_12[grid(64)](buf23, primals_1, buf15, primals_19, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_19 return buf23, buf16, primals_1, primals_14, reinterpret_tensor(buf1, ( 16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf14, (16, 4), (4, 1), 0 ), buf15, reinterpret_tensor(buf19, (16, 4), (4, 1), 0 ), buf20, reinterpret_tensor(buf21, (16, 4), (4, 1), 0 ), primals_18, primals_16, primals_9, reinterpret_tensor(buf6, (16, 1, 5), (1, 1, 16), 0), reinterpret_tensor(buf7, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf8, (16, 5, 1), (1, 16, 1), 0 ), primals_13, primals_12, primals_11 def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def utils_softmax(x, dim: 'int', onnx_trace: 'bool'=False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32) def with_incremental_state(cls): cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls. __bases__ if b != FairseqIncrementalState) return cls class ESM1LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12, affine=True): """Construct a layernorm layer in the TF style (eps inside the sqrt).""" super().__init__() self.hidden_size = (hidden_size,) if isinstance(hidden_size, int ) else tuple(hidden_size) self.eps = eps self.affine = bool(affine) if self.affine: self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) else: self.weight, self.bias = None, None def forward(self, x): dims = tuple(-(i + 1) for i in range(len(self.hidden_size))) means = x.mean(dims, keepdim=True) x_zeromean = x - means variances = x_zeromean.pow(2).mean(dims, keepdim=True) x = x_zeromean / torch.sqrt(variances + self.eps) if self.affine: x = self.weight * x + self.bias return x class FairseqIncrementalState(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_incremental_state() def init_incremental_state(self): self._incremental_state_id = str(uuid.uuid4()) def _get_full_incremental_state_key(self, key: 'str') ->str: return '{}.{}'.format(self._incremental_state_id, key) def get_incremental_state(self, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str' ) ->Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" full_key = self._get_full_incremental_state_key(key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(self, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str', value: 'Dict[str, Optional[Tensor]]') ->Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = self._get_full_incremental_state_key(key) incremental_state[full_key] = value return incremental_state @with_incremental_state class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout= 0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and value to be of the same size' self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias) self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.enable_torch_version = False if hasattr(F, 'multi_head_attention_forward'): self.enable_torch_version = True else: self.enable_torch_version = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward(self, query, key: 'Optional[Tensor]', value: 'Optional[Tensor]', key_padding_mask: 'Optional[Tensor]'=None, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]'=None, need_weights: 'bool'=True, static_kv: 'bool'=False, attn_mask: 'Optional[Tensor]'=None, before_softmax: 'bool'=False, need_head_weights: 'bool'=False) ->Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if (self.enable_torch_version and not self.onnx_trace and incremental_state is None and not static_kv and not torch.jit. is_scripting() and not need_head_weights): assert key is not None and value is not None return F.multi_head_attention_forward(query, key, value, self. embed_dim, self.num_heads, torch.empty([0]), torch.cat(( self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj. weight, k_proj_weight=self.k_proj.weight, v_proj_weight= self.v_proj.weight) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and 'prev_key' in saved_state: if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1) ], dim=1) q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if saved_state is not None: if 'prev_key' in saved_state: _prev_key = saved_state['prev_key'] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self. head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if 'prev_value' in saved_state: _prev_value = saved_state['prev_value'] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: 'Optional[Tensor]' = None if 'prev_key_padding_mask' in saved_state: prev_key_padding_mask = saved_state['prev_key_padding_mask'] assert k is not None and v is not None key_padding_mask = (MultiheadAttention. _append_prev_key_padding_mask(key_padding_mask= key_padding_mask, prev_key_padding_mask= prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv)) saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self. head_dim) saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state['prev_key_padding_mask'] = key_padding_mask assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat([key_padding_mask, torch.zeros (key_padding_mask.size(0), 1).type_as(key_padding_mask) ], dim=1) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill(key_padding_mask. unsqueeze(1).unsqueeze(2), float('-inf')) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace =self.onnx_trace) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p= self.dropout, training=self.training) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. head_dim] if self.onnx_trace and attn.size(1) == 1: attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: 'Optional[Tensor]' = None if need_weights: attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) if not need_head_weights: attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask(key_padding_mask: 'Optional[Tensor]', prev_key_padding_mask: 'Optional[Tensor]', batch_size: 'int', src_len: 'int', static_kv: 'bool') ->Optional[Tensor]: if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), key_padding_mask.float()], dim=1) elif prev_key_padding_mask is not None: filler = torch.zeros((batch_size, src_len - prev_key_padding_mask.size(1)), device= prev_key_padding_mask.device) new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), filler.float()], dim=1) elif key_padding_mask is not None: filler = torch.zeros((batch_size, src_len - key_padding_mask. size(1)), device=key_padding_mask.device) new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state(self, incremental_state: 'Dict[str, Dict[str, Optional[Tensor]]]', new_order: 'Tensor'): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size(0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer(self, incremental_state: 'Optional[Dict[str, Dict[str, Optional[Tensor]]]]') ->Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, 'attn_state') if result is not None: return result else: empty_result: 'Dict[str, Optional[Tensor]]' = {} return empty_result def _set_input_buffer(self, incremental_state: 'Dict[str, Dict[str, Optional[Tensor]]]', buffer: 'Dict[str, Optional[Tensor]]'): return self.set_incremental_state(incremental_state, 'attn_state', buffer) def apply_sparse_mask(attn_weights, tgt_len: 'int', src_len: 'int', bsz: 'int'): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + '.' if name != '' else '' items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + 'in_proj_weight'): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + 'q_proj.weight'] = state_dict[k][:dim] items_to_add[prefix + 'k_proj.weight'] = state_dict[k][dim: 2 * dim] items_to_add[prefix + 'v_proj.weight'] = state_dict[k][2 * dim: ] keys_to_remove.append(k) k_bias = prefix + 'in_proj_bias' if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + 'q_proj.bias'] = state_dict[k_bias][: dim] items_to_add[prefix + 'k_proj.bias'] = state_dict[k_bias][ dim:2 * dim] items_to_add[prefix + 'v_proj.bias'] = state_dict[k_bias][ 2 * dim:] keys_to_remove.append(prefix + 'in_proj_bias') for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value class TransformerLayerNew(nn.Module): """Transformer layer block.""" def __init__(self, embed_dim, ffn_embed_dim, attention_heads, add_bias_kv=True, use_esm1b_layer_norm=False): super().__init__() self.embed_dim = embed_dim self.ffn_embed_dim = ffn_embed_dim self.attention_heads = attention_heads self._init_submodules(add_bias_kv, use_esm1b_layer_norm) def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm): BertLayerNorm = (ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm) self.self_attn = MultiheadAttention(self.embed_dim, self. attention_heads, add_bias_kv=add_bias_kv, add_zero_attn=False) self.self_attn_layer_norm = BertLayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim) self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim) self.final_layer_norm = BertLayerNorm(self.embed_dim) def forward(self, input_0): primals_7 = self.self_attn.bias_k primals_8 = self.self_attn.bias_v primals_9 = self.self_attn.k_proj.weight primals_2 = self.self_attn.k_proj.bias primals_11 = self.self_attn.v_proj.weight primals_3 = self.self_attn.v_proj.bias primals_12 = self.self_attn.q_proj.weight primals_4 = self.self_attn.q_proj.bias primals_13 = self.self_attn.out_proj.weight primals_5 = self.self_attn.out_proj.bias primals_6 = self.self_attn_layer_norm.weight primals_10 = self.self_attn_layer_norm.bias primals_16 = self.fc1.weight primals_14 = self.fc1.bias primals_18 = self.fc2.weight primals_15 = self.fc2.bias primals_17 = self.final_layer_norm.weight primals_19 = self.final_layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0], output[1]
boxiangliu/esm
TransformerLayer
false
1,634
[ "MIT" ]
0
3c143d99103e0ea38a9455f30a73cd9c87376606
https://github.com/boxiangliu/esm/tree/3c143d99103e0ea38a9455f30a73cd9c87376606
my_Hingeloss
import torch import torch.nn as nn class my_Hingeloss(nn.Module): def __init__(self): super(my_Hingeloss, self).__init__() def forward(self, output, target): pos = torch.sum(output * target, 2) neg = torch.max((1 - target) * output, 2) loss = neg[0] - pos + 1 loss[loss < 0] = 0 loss = torch.mean(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0( in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None) tmp3 = tl.load(in_ptr1 + (r0 + 16 * r1), None) tmp5 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None) tmp7 = tl.load(in_ptr1 + (4 + r0 + 16 * r1), None) tmp10 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None) tmp12 = tl.load(in_ptr1 + (8 + r0 + 16 * r1), None) tmp15 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None) tmp17 = tl.load(in_ptr1 + (12 + r0 + 16 * r1), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp6 = tmp1 - tmp5 tmp8 = tmp6 * tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp1 - tmp10 tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp1 - tmp15 tmp18 = tmp16 * tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp3 * tmp0 tmp21 = tmp7 * tmp5 tmp22 = tmp20 + tmp21 tmp23 = tmp12 * tmp10 tmp24 = tmp22 + tmp23 tmp25 = tmp17 * tmp15 tmp26 = tmp24 + tmp25 tmp27 = tmp19 - tmp26 tmp28 = tmp27 + tmp1 tmp29 = 0.0 tmp30 = tmp28 < tmp29 tmp31 = tl.where(tmp30, tmp29, tmp28) tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp34 = tl.sum(tmp32, 1)[:, None] tmp35 = 64.0 tmp36 = tmp34 / tmp35 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0[ grid(1)](buf3, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class my_HingelossNew(nn.Module): def __init__(self): super(my_HingelossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
carsault/chord_sequence_prediction
my_Hingeloss
false
1,635
[ "MIT" ]
0
6eb539a963ca6350bcf0c88b8d8756775ad7c488
https://github.com/carsault/chord_sequence_prediction/tree/6eb539a963ca6350bcf0c88b8d8756775ad7c488
SigmoidCrossEntropyLoss
import torch from torch import Tensor from typing import List from typing import Optional from typing import Union from torch import nn class SigmoidCrossEntropyLoss(nn.Module): def __init__(self, class_weights: 'Optional[Union[Tensor, List]]'=None, **kwargs): """ Params: class_weights: List or 1D tensor of length equal to number of classes. """ super().__init__() if class_weights: self.loss_fn = nn.BCEWithLogitsLoss(reduction='none', pos_weight=torch.Tensor(class_weights)) else: self.loss_fn = nn.BCEWithLogitsLoss(reduction='none') def forward(self, preds: 'Tensor', target: 'Tensor') ->Tensor: if preds.ndim != 2: raise RuntimeError( 'SigmoidCrossEntropyLoss currently supported for 2D tensors.') element_loss = self.loss_fn(preds.type(torch.float32), target.type( torch.float32)) loss = torch.sum(element_loss, dim=1) loss = torch.mean(loss) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor from typing import List from typing import Optional from typing import Union from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp14 = tmp1 - tmp13 tmp16 = tmp14 * tmp15 tmp17 = triton_helpers.minimum(tmp5, tmp15) tmp18 = tl_math.abs(tmp15) tmp19 = -tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = libdevice.log1p(tmp20) tmp22 = tmp17 - tmp21 tmp23 = tmp16 - tmp22 tmp24 = tmp12 + tmp23 tmp26 = tmp1 - tmp25 tmp28 = tmp26 * tmp27 tmp29 = triton_helpers.minimum(tmp5, tmp27) tmp30 = tl_math.abs(tmp27) tmp31 = -tmp30 tmp32 = tl_math.exp(tmp31) tmp33 = libdevice.log1p(tmp32) tmp34 = tmp29 - tmp33 tmp35 = tmp28 - tmp34 tmp36 = tmp24 + tmp35 tmp38 = tmp1 - tmp37 tmp40 = tmp38 * tmp39 tmp41 = triton_helpers.minimum(tmp5, tmp39) tmp42 = tl_math.abs(tmp39) tmp43 = -tmp42 tmp44 = tl_math.exp(tmp43) tmp45 = libdevice.log1p(tmp44) tmp46 = tmp41 - tmp45 tmp47 = tmp40 - tmp46 tmp48 = tmp36 + tmp47 tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp51 = tl.sum(tmp49, 1)[:, None] tmp52 = 4.0 tmp53 = tmp51 / tmp52 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0[grid(1)]( buf2, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class SigmoidCrossEntropyLossNew(nn.Module): def __init__(self, class_weights: 'Optional[Union[Tensor, List]]'=None, **kwargs): """ Params: class_weights: List or 1D tensor of length equal to number of classes. """ super().__init__() if class_weights: self.loss_fn = nn.BCEWithLogitsLoss(reduction='none', pos_weight=torch.Tensor(class_weights)) else: self.loss_fn = nn.BCEWithLogitsLoss(reduction='none') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
carlogrisetti/ludwig
SigmoidCrossEntropyLoss
false
1,636
[ "Apache-2.0" ]
0
5c0887f14867e1577e0ddc3806c5cf7a781fb665
https://github.com/carlogrisetti/ludwig/tree/5c0887f14867e1577e0ddc3806c5cf7a781fb665
Explorer
import torch import numpy as np import torch.nn as nn def init(module, weight_init, bias_init, gain=1): weight_init(module.weight.data, gain=gain) bias_init(module.bias.data) return module class Explorer(nn.Module): def __init__(self, state_dim, max_action, exp_regularization): super(Explorer, self).__init__() def init_(m): return init(m, nn.init.orthogonal_, lambda x: nn.init.constant_ (x, 0), np.sqrt(2)) self.l1 = init_(nn.Linear(state_dim, 64)) self.l2 = init_(nn.Linear(64, 64)) self.l3 = init_(nn.Linear(64, state_dim)) self.max_action = max_action self.exp_regularization = exp_regularization def forward(self, state): a = torch.tanh(self.l1(state)) a = torch.tanh(self.l2(a)) return self.max_action * torch.tanh(self.l3(a) ) * self.exp_regularization ** 2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'max_action': 4, 'exp_regularization': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tmp4 = 16.0 tmp5 = tmp3 * tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf4, primals_6, primals_4 def init(module, weight_init, bias_init, gain=1): weight_init(module.weight.data, gain=gain) bias_init(module.bias.data) return module class ExplorerNew(nn.Module): def __init__(self, state_dim, max_action, exp_regularization): super(ExplorerNew, self).__init__() def init_(m): return init(m, nn.init.orthogonal_, lambda x: nn.init.constant_ (x, 0), np.sqrt(2)) self.l1 = init_(nn.Linear(state_dim, 64)) self.l2 = init_(nn.Linear(64, 64)) self.l3 = init_(nn.Linear(64, state_dim)) self.max_action = max_action self.exp_regularization = exp_regularization def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
baturaysaglam/DISCOVER
Explorer
false
1,637
[ "MIT" ]
0
423158c84a5935ca5755ccad06ea5fe20fb57d76
https://github.com/baturaysaglam/DISCOVER/tree/423158c84a5935ca5755ccad06ea5fe20fb57d76
ReduceLast
import torch def sequence_length_3D(sequence: 'torch.Tensor') ->torch.Tensor: used = torch.sign(torch.amax(torch.abs(sequence), dim=2)) length = torch.sum(used, 1) length = length.int() return length class ReduceLast(torch.nn.Module): def forward(self, inputs, mask=None): batch_size = inputs.shape[0] sequence_length = sequence_length_3D(inputs) - 1 sequence_length[sequence_length < 0] = 0 gathered = inputs[torch.arange(batch_size), sequence_length.type( torch.int64)] return gathered def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0( in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp26 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp41 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp44 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp54 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp56 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp59 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp62 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp1 = tl_math.abs(tmp0) tmp3 = tl_math.abs(tmp2) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = tl_math.abs(tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tl_math.abs(tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp11 = tl.full([1], 0, tl.int32) tmp12 = tmp11 < tmp10 tmp13 = tmp12.to(tl.int8) tmp14 = tmp10 < tmp11 tmp15 = tmp14.to(tl.int8) tmp16 = tmp13 - tmp15 tmp17 = tmp16.to(tmp10.dtype) tmp19 = tl_math.abs(tmp18) tmp21 = tl_math.abs(tmp20) tmp22 = triton_helpers.maximum(tmp19, tmp21) tmp24 = tl_math.abs(tmp23) tmp25 = triton_helpers.maximum(tmp22, tmp24) tmp27 = tl_math.abs(tmp26) tmp28 = triton_helpers.maximum(tmp25, tmp27) tmp29 = tmp11 < tmp28 tmp30 = tmp29.to(tl.int8) tmp31 = tmp28 < tmp11 tmp32 = tmp31.to(tl.int8) tmp33 = tmp30 - tmp32 tmp34 = tmp33.to(tmp28.dtype) tmp35 = tmp17 + tmp34 tmp37 = tl_math.abs(tmp36) tmp39 = tl_math.abs(tmp38) tmp40 = triton_helpers.maximum(tmp37, tmp39) tmp42 = tl_math.abs(tmp41) tmp43 = triton_helpers.maximum(tmp40, tmp42) tmp45 = tl_math.abs(tmp44) tmp46 = triton_helpers.maximum(tmp43, tmp45) tmp47 = tmp11 < tmp46 tmp48 = tmp47.to(tl.int8) tmp49 = tmp46 < tmp11 tmp50 = tmp49.to(tl.int8) tmp51 = tmp48 - tmp50 tmp52 = tmp51.to(tmp46.dtype) tmp53 = tmp35 + tmp52 tmp55 = tl_math.abs(tmp54) tmp57 = tl_math.abs(tmp56) tmp58 = triton_helpers.maximum(tmp55, tmp57) tmp60 = tl_math.abs(tmp59) tmp61 = triton_helpers.maximum(tmp58, tmp60) tmp63 = tl_math.abs(tmp62) tmp64 = triton_helpers.maximum(tmp61, tmp63) tmp65 = tmp11 < tmp64 tmp66 = tmp65.to(tl.int8) tmp67 = tmp64 < tmp11 tmp68 = tmp67.to(tl.int8) tmp69 = tmp66 - tmp68 tmp70 = tmp69.to(tmp64.dtype) tmp71 = tmp53 + tmp70 tmp72 = tmp71.to(tl.int32) tmp73 = tl.full([1], 1, tl.int32) tmp74 = tmp72 - tmp73 tmp75 = tmp74 < tmp11 tmp76 = tl.where(tmp75, tmp11, tmp74) tl.store(out_ptr1 + x2, tmp76, xmask) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 16 x0 = xindex % 16 x1 = xindex // 16 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (x0 + 16 * tmp5 + 64 * x1), xmask) tl.store(out_ptr0 + x4, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int32) get_raw_stream(0) triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0[ grid(16)](arg0_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_index_1[grid(256)](buf1, arg0_1, buf2, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del buf1 return buf2, def sequence_length_3D(sequence: 'torch.Tensor') ->torch.Tensor: used = torch.sign(torch.amax(torch.abs(sequence), dim=2)) length = torch.sum(used, 1) length = length.int() return length class ReduceLastNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
carlogrisetti/ludwig
ReduceLast
false
1,638
[ "Apache-2.0" ]
0
5c0887f14867e1577e0ddc3806c5cf7a781fb665
https://github.com/carlogrisetti/ludwig/tree/5c0887f14867e1577e0ddc3806c5cf7a781fb665
Block
import torch import torch.nn as nn import torch.nn.functional as F class RemoveChannelMeanStd(torch.nn.Module): def forward(self, x): x2 = x.view(x.size(0), x.size(1), -1) mean = x2.mean(dim=2).view(x.size(0), x.size(1), 1, 1) std = x2.std(dim=2).view(x.size(0), x.size(1), 1, 1) return (x - mean) / std class Block(nn.Module): def __init__(self, in_planes, planes, stride=1, groups=False): super(Block, self).__init__() self.bn1 = RemoveChannelMeanStd() if not groups: self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride =stride, padding=1, bias=True) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=True) else: self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride =stride, padding=1, bias=True, groups=min(in_planes, planes)) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=True, groups=planes) self.bn2 = RemoveChannelMeanStd() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True), RemoveChannelMeanStd()) def forward(self, x): out = self.conv1(x) out = self.bn1(out) relu1 = F.relu(out) out = self.conv2(relu1) out = self.bn2(out) shortcut = self.shortcut(x) if hasattr(self, 'shortcut') else x out += shortcut out = F.relu(out) return relu1, out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_convolution_div_mean_relu_std_sub_0(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl .constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 16, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp3 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = 16.0 tmp22 = tmp6 / tmp21 tmp23 = 15.0 tmp24 = tmp20 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tmp2 - tmp22 tmp27 = tmp26 / tmp25 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr2 + x3, tmp25, xmask) tl.store(out_ptr0 + (r2 + 16 * x3), tmp29, xmask) @triton.jit def triton_per_fused_add_convolution_div_mean_relu_std_sub_threshold_backward_1( in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 16, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp3 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = 16.0 tmp22 = tmp6 / tmp21 tmp23 = 15.0 tmp24 = tmp20 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tmp2 - tmp22 tmp27 = tmp26 / tmp25 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp32 = 0.0 tmp33 = tmp31 <= tmp32 tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr2 + x3, tmp25, xmask) tl.store(out_ptr0 + (r2 + 16 * x3), tmp31, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp33, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = buf2 del buf2 buf7 = buf5 del buf5 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_convolution_div_mean_relu_std_sub_0[grid(16)](buf1, buf3, buf7, primals_2, buf8, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 buf9 = extern_kernels.convolution(buf8, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = buf11 del buf11 buf16 = buf14 del buf14 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_per_fused_add_convolution_div_mean_relu_std_sub_threshold_backward_1[ grid(16)](buf10, buf12, buf16, primals_5, primals_3, buf17, buf18, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_5 return (buf8, buf17, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf7, buf8, buf10, reinterpret_tensor(buf12, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf16, buf18) class RemoveChannelMeanStd(torch.nn.Module): def forward(self, x): x2 = x.view(x.size(0), x.size(1), -1) mean = x2.mean(dim=2).view(x.size(0), x.size(1), 1, 1) std = x2.std(dim=2).view(x.size(0), x.size(1), 1, 1) return (x - mean) / std class BlockNew(nn.Module): def __init__(self, in_planes, planes, stride=1, groups=False): super(BlockNew, self).__init__() self.bn1 = RemoveChannelMeanStd() if not groups: self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride =stride, padding=1, bias=True) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=True) else: self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride =stride, padding=1, bias=True, groups=min(in_planes, planes)) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=True, groups=planes) self.bn2 = RemoveChannelMeanStd() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True), RemoveChannelMeanStd()) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
cadurosar/graph_kd_dense_cifar100
Block
false
1,639
[ "MIT" ]
0
84054ab4f8f61c9db3460993661ba7bf1d951b36
https://github.com/cadurosar/graph_kd_dense_cifar100/tree/84054ab4f8f61c9db3460993661ba7bf1d951b36
NPairsLoss
import torch from torch import nn class NPairsLoss(nn.Module): def __init__(self, name): super(NPairsLoss, self).__init__() self.name = name def forward(self, r1, r2): """ Computes the N-Pairs Loss between the r1 and r2 representations. :param r1: Tensor of shape (batch_size, representation_size) :param r2: Tensor of shape (batch_size, representation_size) :return: he scalar loss """ scores = torch.matmul(r1, r2.t()) diagonal_mean = torch.mean(torch.diag(scores)) mean_log_row_sum_exp = torch.mean(torch.logsumexp(scores, dim=1)) return -diagonal_mean + mean_log_row_sum_exp def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'name': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_diagonal_copy_logsumexp_mean_neg_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = tl_math.abs(tmp10) tmp12 = float('inf') tmp13 = tmp11 == tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = tmp4 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp5 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp9 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tl_math.log(tmp26) tmp28 = tmp27 + tmp15 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.sum(tmp29, 1)[:, None] tmp32 = 4.0 tmp33 = tmp3 / tmp32 tmp34 = -tmp33 tmp35 = tmp31 / tmp32 tmp36 = tmp34 + tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_diagonal_copy_logsumexp_mean_neg_0[grid(1)](buf3, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf3, class NPairsLossNew(nn.Module): def __init__(self, name): super(NPairsLossNew, self).__init__() self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
celsofranssa/PAWS
NPairsLoss
false
1,640
[ "MIT" ]
0
3171c8a22990059f5d4c0e7e81cc0299a716efb2
https://github.com/celsofranssa/PAWS/tree/3171c8a22990059f5d4c0e7e81cc0299a716efb2
CNNCifar
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.nn.functional as F class CNNCifar(nn.Module): def __init__(self, args): super(CNNCifar, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, args.num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {'args': _mock_config(num_classes=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (4, 84), (84, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 4), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_6[grid(16)](buf12, buf13, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf14 = buf12 del buf12 triton_poi_fused__log_softmax_7[grid(16)](buf13, buf14, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf13 return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, buf14, primals_10, primals_8, primals_6) class CNNCifarNew(nn.Module): def __init__(self, args): super(CNNCifarNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, args.num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Joey61Liuyi/Federated-Learning-PyTorch
CNNCifar
false
1,641
[ "MIT" ]
0
e95f096b18c5a1bf30fc0485acd5a15c84327f2e
https://github.com/Joey61Liuyi/Federated-Learning-PyTorch/tree/e95f096b18c5a1bf30fc0485acd5a15c84327f2e
RMSELoss
import torch import torch.nn as nn class RMSELoss(nn.Module): def __init__(self, eps=1e-08): super(RMSELoss, self).__init__() self.mse = nn.MSELoss() self.eps = eps def forward(self, y_hat, y): return torch.sqrt(self.mse(y_hat, y) + self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1e-08 tmp10 = tmp8 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mse_loss_sqrt_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSELossNew(nn.Module): def __init__(self, eps=1e-08): super(RMSELossNew, self).__init__() self.mse = nn.MSELoss() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cerisara/weibull-knowledge-informed-ml
RMSELoss
false
1,642
[ "MIT" ]
0
19017817f5324fb1fffd8322d2d3567a6271948c
https://github.com/cerisara/weibull-knowledge-informed-ml/tree/19017817f5324fb1fffd8322d2d3567a6271948c
MAPELoss
import torch import torch.nn as nn class MAPELoss(nn.Module): def __init__(self, eps=1e-08): super(MAPELoss, self).__init__() self.eps = eps def forward(self, y_hat, y): return torch.mean(torch.abs(y - y_hat) / torch.abs(y + self.eps)) * 100 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1e-08 tmp5 = tmp0 + tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tmp13 = 100.0 tmp14 = tmp12 * tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MAPELossNew(nn.Module): def __init__(self, eps=1e-08): super(MAPELossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cerisara/weibull-knowledge-informed-ml
MAPELoss
false
1,643
[ "MIT" ]
0
19017817f5324fb1fffd8322d2d3567a6271948c
https://github.com/cerisara/weibull-knowledge-informed-ml/tree/19017817f5324fb1fffd8322d2d3567a6271948c
MY_NGM_FFNN
import random import torch import torch.nn as nn from collections import defaultdict import torch.nn.functional as F import torch.optim as optim class MY_NGM_FFNN(nn.Module): def __init__(self, alpha, input_dim, hidden1_dim, hidden2_dim, output_dim, device=torch.device('cpu')): super(MY_NGM_FFNN, self).__init__() self.hidden1 = nn.Linear(input_dim, hidden1_dim) self.hidden2 = nn.Linear(hidden1_dim, hidden2_dim) self.output = nn.Linear(hidden2_dim, output_dim) self.alpha = alpha self.device = device self def save(self, output_dir, model_name): None torch.save(self.state_dict(), output_dir + model_name + '.pt') None def load(self, output_dir, model_name): None self.load_state_dict(torch.load(output_dir + model_name + '.pt')) None def forward(self, tf_idf_vec): hidden1 = F.relu(self.hidden1(tf_idf_vec)) hidden2 = F.relu(self.hidden2(hidden1)) return F.log_softmax(self.output(hidden2), -1) def reset_parameters(self): self.hidden1.reset_parameters() self.hidden2.reset_parameters() self.output.reset_parameters() def get_last_hidden(self, tf_idf_vec): hidden1 = F.relu(self.hidden1(tf_idf_vec)) return F.relu(self.hidden2(hidden1)) def train_(self, seed_nodes, train_node_pairs, node2vec, node2label, num_epoch, batch_size, learning_rate): None self.train() loss_function = nn.NLLLoss() optimizer = optim.SGD(self.parameters(), lr=learning_rate) node2neighbors = defaultdict(list) for src, dest in train_node_pairs: node2neighbors[src].append(dest) node2neighbors[dest].append(src) labeled_nodes = dict() for node in seed_nodes: labeled_nodes[node] = node2label[node] iteration = 1 while iteration < 2: None None iteration += 1 for e in range(NUM_EPOCH): train_node_pairs_cpy = train_node_pairs[:] total_loss = 0 count = 0 while train_node_pairs_cpy: optimizer.zero_grad() loss = torch.tensor(0, dtype=torch.float32, device=self .device) try: batch = random.sample(train_node_pairs_cpy, batch_size) except ValueError: break for src, dest in batch: count += 1 train_node_pairs_cpy.remove((src, dest)) src_vec = torch.tensor(node2vec[src]) dest_vec = torch.tensor(node2vec[dest]) if src in labeled_nodes: src_target = torch.tensor([labeled_nodes[src]]) src_softmax = self.forward(torch.tensor(src_vec)) src_incident_edges = len(node2neighbors[src]) loss += loss_function(src_softmax.view(1, -1), src_target) * (1 / src_incident_edges) if dest in labeled_nodes: dest_target = torch.tensor([labeled_nodes[dest]]) dest_softmax = self.forward(torch.tensor(dest_vec)) dest_incident_edges = len(node2neighbors[dest]) loss += loss_function(dest_softmax.view(1, -1), dest_target) * (1 / dest_incident_edges) loss += self.alpha * torch.dist(self. get_last_hidden(src_vec), self.get_last_hidden( dest_vec)) if loss.item() != 0: assert not torch.isnan(loss) loss.backward() optimizer.step() total_loss += loss.item() del loss total_loss / len(labeled_nodes) None for node in list(labeled_nodes.keys()): label = labeled_nodes[node] for neighbor in node2neighbors[node]: if neighbor not in labeled_nodes: labeled_nodes[neighbor] = label def predict(self, tf_idf_vec): return torch.argmax(self.forward(tf_idf_vec)).item() def evaluate(self, test_nodes, node2vec, node2label): self.eval() None correct_count = 0 for node in test_nodes: predicted = self.predict(torch.tensor(node2vec[node], device= self.device)) None if predicted == node2label[node]: correct_count += 1 return float(correct_count) / len(test_nodes) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4, 'input_dim': 4, 'hidden1_dim': 4, 'hidden2_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import random import torch.nn as nn from collections import defaultdict import torch.nn.functional as F import torch.optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf4, buf5, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__log_softmax_2[grid(256)](buf5, buf6, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class MY_NGM_FFNNNew(nn.Module): def __init__(self, alpha, input_dim, hidden1_dim, hidden2_dim, output_dim, device=torch.device('cpu')): super(MY_NGM_FFNNNew, self).__init__() self.hidden1 = nn.Linear(input_dim, hidden1_dim) self.hidden2 = nn.Linear(hidden1_dim, hidden2_dim) self.output = nn.Linear(hidden2_dim, output_dim) self.alpha = alpha self.device = device self def save(self, output_dir, model_name): None torch.save(self.state_dict(), output_dir + model_name + '.pt') None def load(self, output_dir, model_name): None self.load_state_dict(torch.load(output_dir + model_name + '.pt')) None def reset_parameters(self): self.hidden1.reset_parameters() self.hidden2.reset_parameters() self.output.reset_parameters() def get_last_hidden(self, tf_idf_vec): hidden1 = F.relu(self.hidden1(tf_idf_vec)) return F.relu(self.hidden2(hidden1)) def train_(self, seed_nodes, train_node_pairs, node2vec, node2label, num_epoch, batch_size, learning_rate): None self.train() loss_function = nn.NLLLoss() optimizer = optim.SGD(self.parameters(), lr=learning_rate) node2neighbors = defaultdict(list) for src, dest in train_node_pairs: node2neighbors[src].append(dest) node2neighbors[dest].append(src) labeled_nodes = dict() for node in seed_nodes: labeled_nodes[node] = node2label[node] iteration = 1 while iteration < 2: None None iteration += 1 for e in range(NUM_EPOCH): train_node_pairs_cpy = train_node_pairs[:] total_loss = 0 count = 0 while train_node_pairs_cpy: optimizer.zero_grad() loss = torch.tensor(0, dtype=torch.float32, device=self .device) try: batch = random.sample(train_node_pairs_cpy, batch_size) except ValueError: break for src, dest in batch: count += 1 train_node_pairs_cpy.remove((src, dest)) src_vec = torch.tensor(node2vec[src]) dest_vec = torch.tensor(node2vec[dest]) if src in labeled_nodes: src_target = torch.tensor([labeled_nodes[src]]) src_softmax = self.forward(torch.tensor(src_vec)) src_incident_edges = len(node2neighbors[src]) loss += loss_function(src_softmax.view(1, -1), src_target) * (1 / src_incident_edges) if dest in labeled_nodes: dest_target = torch.tensor([labeled_nodes[dest]]) dest_softmax = self.forward(torch.tensor(dest_vec)) dest_incident_edges = len(node2neighbors[dest]) loss += loss_function(dest_softmax.view(1, -1), dest_target) * (1 / dest_incident_edges) loss += self.alpha * torch.dist(self. get_last_hidden(src_vec), self.get_last_hidden( dest_vec)) if loss.item() != 0: assert not torch.isnan(loss) loss.backward() optimizer.step() total_loss += loss.item() del loss total_loss / len(labeled_nodes) None for node in list(labeled_nodes.keys()): label = labeled_nodes[node] for neighbor in node2neighbors[node]: if neighbor not in labeled_nodes: labeled_nodes[neighbor] = label def predict(self, tf_idf_vec): return torch.argmax(self.forward(tf_idf_vec)).item() def evaluate(self, test_nodes, node2vec, node2label): self.eval() None correct_count = 0 for node in test_nodes: predicted = self.predict(torch.tensor(node2vec[node], device= self.device)) None if predicted == node2label[node]: correct_count += 1 return float(correct_count) / len(test_nodes) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.output.weight primals_7 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
calpoly-bioinf/knowledge_driven_modeling
MY_NGM_FFNN
false
1,644
[ "MIT" ]
0
dbe55d5bb07f7c5a1834a21fde8833f295e3ac96
https://github.com/calpoly-bioinf/knowledge_driven_modeling/tree/dbe55d5bb07f7c5a1834a21fde8833f295e3ac96
Discriminator
import torch import torch.nn as nn import torch.nn.functional as F class Discriminator(nn.Module): def __init__(self, n_layersDecod, hidden_size, output_size=2): super(Discriminator, self).__init__() self.map1 = nn.Linear(n_layersDecod * hidden_size, hidden_size) self.map2 = nn.Linear(hidden_size, hidden_size) self.map3 = nn.Linear(hidden_size, output_size) self.n_layersDecod = n_layersDecod self.hidden_size = hidden_size def forward(self, x): x = x.view(-1, self.n_layersDecod * self.hidden_size) x = F.relu(self.map1(x)) x = F.relu(self.map2(x)) return nn.Softmax()(self.map3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_layersDecod': 1, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf4 return buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_6, primals_4 class DiscriminatorNew(nn.Module): def __init__(self, n_layersDecod, hidden_size, output_size=2): super(DiscriminatorNew, self).__init__() self.map1 = nn.Linear(n_layersDecod * hidden_size, hidden_size) self.map2 = nn.Linear(hidden_size, hidden_size) self.map3 = nn.Linear(hidden_size, output_size) self.n_layersDecod = n_layersDecod self.hidden_size = hidden_size def forward(self, input_0): primals_2 = self.map1.weight primals_3 = self.map1.bias primals_4 = self.map2.weight primals_5 = self.map2.bias primals_6 = self.map3.weight primals_7 = self.map3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
carsault/chord_sequence_prediction
Discriminator
false
1,645
[ "MIT" ]
0
6eb539a963ca6350bcf0c88b8d8756775ad7c488
https://github.com/carsault/chord_sequence_prediction/tree/6eb539a963ca6350bcf0c88b8d8756775ad7c488
NeuralNet
import torch class NeuralNet(torch.nn.Module): def __init__(self, input_size, output_size, hidden_size, lr=0.0001): super(NeuralNet, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.HiddenLayer = torch.nn.Linear(self.input_size, self. hidden_size, bias=True).double() self.DropoutLayer = torch.nn.Dropout(p=0.0) self.OutputLayer = torch.nn.Linear(self.hidden_size, self. output_size, bias=True).double() self.neural_tangent_kernel = None self.learning_rate = lr self.optimizer = torch.optim.SGD(self.parameters(), lr=self. learning_rate) self.loss_arr = [] self.__policy_loss = [] def forward(self, x): x = x.double() x = self.HiddenLayer(x) x = self.DropoutLayer(x) x = torch.relu(x) x = self.OutputLayer(x) x = torch.softmax(x, dim=1) return x def __partial_derivatives(self, x): self.zero_grad() w1 = torch.empty(self.output_size, self.hidden_size * self. input_size + self.hidden_size, dtype=torch.float64) w2 = torch.empty(self.output_size, self.hidden_size * self. output_size + self.output_size, dtype=torch.float64) for i in range(self.output_size): y = self.forward(x) y = y[0][i] y.backward() wi1 = self.HiddenLayer.weight.grad wi1 = torch.reshape(wi1, [wi1.shape[0] * wi1.shape[1], 1]) wi1 = torch.cat([wi1, self.HiddenLayer.bias.grad.unsqueeze(1)]) wi2 = self.OutputLayer.weight.grad wi2 = torch.reshape(wi2, [wi2.shape[0] * wi2.shape[1], 1]) wi2 = torch.cat([wi2, self.OutputLayer.bias.grad.unsqueeze(1)]) wi1g = wi1.clone().detach() wi2g = wi2.clone().detach() w1[i] = wi1g.squeeze() w2[i] = wi2g.squeeze() self.zero_grad() return w1, w2 def compute_neural_tangent_kernel(self, x): kernel = torch.zeros([x.shape[0] * self.output_size, x.shape[0] * self.output_size], dtype=torch.float64, requires_grad=False) i = 0 for x1 in x.data: w1x1, w2x1 = self.__partial_derivatives(x1.unsqueeze(dim=0)) j = 0 for x2 in x.data: w1x2, w2x2 = self.__partial_derivatives(x2.unsqueeze(dim=0)) kernel[self.output_size * i:self.output_size * i + self. output_size, self.output_size * j:self.output_size * j + self.output_size] = torch.matmul(w1x1, w1x2.transpose(0, 1) ) + torch.matmul(w2x1, w2x2.transpose(0, 1)) j += 1 i += 1 self.neural_tangent_kernel = kernel return kernel def train_network(self, log_probs, gains, gains_normed=0): self.__policy_loss = [] eps = 1e-08 batch_len = len(gains) gains.clone().detach().requires_grad_(True) if gains_normed: gains_norm = gains else: gains_norm = (gains - gains.mean()) / (gains.std() + eps) for k in range(batch_len): self.__policy_loss.append(-log_probs[k] * gains_norm[k]) self.__policy_loss = torch.cat(self.__policy_loss).sum() self.__policy_loss.backward() self.optimizer.step() self.optimizer.zero_grad() self.loss_arr.append(self.__policy_loss.item()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.full([1], 0.0, tl.float64) tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = libdevice.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float64) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float64) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2, primals_3, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float64) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float64) triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 return buf5, reinterpret_tensor(buf0, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf5, primals_4, buf6 class NeuralNetNew(torch.nn.Module): def __init__(self, input_size, output_size, hidden_size, lr=0.0001): super(NeuralNetNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.HiddenLayer = torch.nn.Linear(self.input_size, self. hidden_size, bias=True).double() self.DropoutLayer = torch.nn.Dropout(p=0.0) self.OutputLayer = torch.nn.Linear(self.hidden_size, self. output_size, bias=True).double() self.neural_tangent_kernel = None self.learning_rate = lr self.optimizer = torch.optim.SGD(self.parameters(), lr=self. learning_rate) self.loss_arr = [] self.__policy_loss = [] def __partial_derivatives(self, x): self.zero_grad() w1 = torch.empty(self.output_size, self.hidden_size * self. input_size + self.hidden_size, dtype=torch.float64) w2 = torch.empty(self.output_size, self.hidden_size * self. output_size + self.output_size, dtype=torch.float64) for i in range(self.output_size): y = self.forward(x) y = y[0][i] y.backward() wi1 = self.HiddenLayer.weight.grad wi1 = torch.reshape(wi1, [wi1.shape[0] * wi1.shape[1], 1]) wi1 = torch.cat([wi1, self.HiddenLayer.bias.grad.unsqueeze(1)]) wi2 = self.OutputLayer.weight.grad wi2 = torch.reshape(wi2, [wi2.shape[0] * wi2.shape[1], 1]) wi2 = torch.cat([wi2, self.OutputLayer.bias.grad.unsqueeze(1)]) wi1g = wi1.clone().detach() wi2g = wi2.clone().detach() w1[i] = wi1g.squeeze() w2[i] = wi2g.squeeze() self.zero_grad() return w1, w2 def compute_neural_tangent_kernel(self, x): kernel = torch.zeros([x.shape[0] * self.output_size, x.shape[0] * self.output_size], dtype=torch.float64, requires_grad=False) i = 0 for x1 in x.data: w1x1, w2x1 = self.__partial_derivatives(x1.unsqueeze(dim=0)) j = 0 for x2 in x.data: w1x2, w2x2 = self.__partial_derivatives(x2.unsqueeze(dim=0)) kernel[self.output_size * i:self.output_size * i + self. output_size, self.output_size * j:self.output_size * j + self.output_size] = torch.matmul(w1x1, w1x2.transpose(0, 1) ) + torch.matmul(w2x1, w2x2.transpose(0, 1)) j += 1 i += 1 self.neural_tangent_kernel = kernel return kernel def train_network(self, log_probs, gains, gains_normed=0): self.__policy_loss = [] eps = 1e-08 batch_len = len(gains) gains.clone().detach().requires_grad_(True) if gains_normed: gains_norm = gains else: gains_norm = (gains - gains.mean()) / (gains.std() + eps) for k in range(batch_len): self.__policy_loss.append(-log_probs[k] * gains_norm[k]) self.__policy_loss = torch.cat(self.__policy_loss).sum() self.__policy_loss.backward() self.optimizer.step() self.optimizer.zero_grad() self.loss_arr.append(self.__policy_loss.item()) def forward(self, input_0): primals_2 = self.HiddenLayer.weight primals_3 = self.HiddenLayer.bias primals_4 = self.OutputLayer.weight primals_5 = self.OutputLayer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
bva-bme/Constrained_Policy_Gradient
NeuralNet
false
1,646
[ "MIT" ]
0
2331f55ff3bf06e2276662517c34cc45d5a51da8
https://github.com/bva-bme/Constrained_Policy_Gradient/tree/2331f55ff3bf06e2276662517c34cc45d5a51da8
RMSLELoss
import torch import torch.nn as nn class RMSLELoss(nn.Module): def __init__(self, eps=1e-08): super(RMSLELoss, self).__init__() self.mse = nn.MSELoss() self.eps = eps def forward(self, y_hat, y): return torch.sqrt(self.mse(torch.log(y_hat + 1), torch.log(y + 1)) + self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp5 = tmp4 + tmp1 tmp6 = tl_math.log(tmp5) tmp7 = tmp3 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 256.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.sqrt(tmp15) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mse_loss_sqrt_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSLELossNew(nn.Module): def __init__(self, eps=1e-08): super(RMSLELossNew, self).__init__() self.mse = nn.MSELoss() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cerisara/weibull-knowledge-informed-ml
RMSLELoss
false
1,647
[ "MIT" ]
0
19017817f5324fb1fffd8322d2d3567a6271948c
https://github.com/cerisara/weibull-knowledge-informed-ml/tree/19017817f5324fb1fffd8322d2d3567a6271948c
BERTEmbedding4
import torch import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim class LearnedPositionalEmbedding3(nn.Module): def __init__(self, d_model, max_len=512): super().__init__() pe = torch.zeros(max_len, d_model).float() self.a_2 = nn.Parameter(torch.ones_like(pe)) self.b_2 = nn.Parameter(torch.zeros_like(pe)) pe.require_grad = True pe = pe.unsqueeze(0) self.pe = nn.Parameter(pe) torch.nn.init.normal_(self.pe, std=d_model ** -0.5) def forward(self, x): return self.a_2 * self.pe[:, :x.size(1)] + self.b_2 class BERTEmbedding4(nn.Module): """ BERT Embedding which is consisted with under features 1. PositionalEmbedding : adding positional information using sin, cos sum of all these features are output of BERTEmbedding """ def __init__(self, input_dim, max_len, dropout=0.1): """ :param vocab_size: total vocab size :param embed_size: embedding size of token embedding :param dropout: dropout rate """ super().__init__() self.learnedPosition = LearnedPositionalEmbedding3(d_model= input_dim, max_len=max_len) self.dropout = nn.Dropout(p=dropout) def forward(self, sequence): x = self.learnedPosition(sequence) + sequence return self.dropout(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'max_len': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import chain as chain import torch.utils.data import torch.hub import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, primals_4, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 del primals_4 return buf0, primals_1, primals_2 class LearnedPositionalEmbedding3(nn.Module): def __init__(self, d_model, max_len=512): super().__init__() pe = torch.zeros(max_len, d_model).float() self.a_2 = nn.Parameter(torch.ones_like(pe)) self.b_2 = nn.Parameter(torch.zeros_like(pe)) pe.require_grad = True pe = pe.unsqueeze(0) self.pe = nn.Parameter(pe) torch.nn.init.normal_(self.pe, std=d_model ** -0.5) def forward(self, x): return self.a_2 * self.pe[:, :x.size(1)] + self.b_2 class BERTEmbedding4New(nn.Module): """ BERT Embedding which is consisted with under features 1. PositionalEmbedding : adding positional information using sin, cos sum of all these features are output of BERTEmbedding """ def __init__(self, input_dim, max_len, dropout=0.1): """ :param vocab_size: total vocab size :param embed_size: embedding size of token embedding :param dropout: dropout rate """ super().__init__() self.learnedPosition = LearnedPositionalEmbedding3(d_model= input_dim, max_len=max_len) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0): primals_1 = self.learnedPosition.a_2 primals_4 = self.learnedPosition.b_2 primals_2 = self.learnedPosition.pe primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
byeongjokim/LateTemporalModeling3DCNN_for_sign
BERTEmbedding4
false
1,648
[ "MIT" ]
0
e3a802fcf91dc3930aea782464ee34d9b747d3ab
https://github.com/byeongjokim/LateTemporalModeling3DCNN_for_sign/tree/e3a802fcf91dc3930aea782464ee34d9b747d3ab
SpaceToBatch
import torch import torch.nn as nn import torch.utils.data class SpaceToDim(nn.Module): def __init__(self, scale_factor, dims=(-2, -1), dim=0): super(SpaceToDim, self).__init__() self.scale_factor = scale_factor self.dims = dims self.dim = dim def forward(self, x): _shape = list(x.shape) shape = _shape.copy() dims = [x.dim() + self.dims[0] if self.dims[0] < 0 else self.dims[0 ], x.dim() + self.dims[1] if self.dims[1] < 0 else self.dims[1]] dims = [max(abs(dims[0]), abs(dims[1])), min(abs(dims[0]), abs(dims [1]))] if self.dim in dims: raise RuntimeError("Integrate dimension can't be space dimension!") shape[dims[0]] //= self.scale_factor shape[dims[1]] //= self.scale_factor shape.insert(dims[0] + 1, self.scale_factor) shape.insert(dims[1] + 1, self.scale_factor) dim = self.dim if self.dim < dims[1] else self.dim + 1 dim = dim if dim <= dims[0] else dim + 1 x = x.reshape(*shape) perm = [dim, dims[1] + 1, dims[0] + 2] perm = [i for i in range(min(perm))] + perm perm.extend(i for i in range(x.dim()) if i not in perm) x = x.permute(*perm) shape = _shape shape[self.dim] *= self.scale_factor ** 2 shape[self.dims[0]] //= self.scale_factor shape[self.dims[1]] //= self.scale_factor return x.reshape(*shape) def extra_repr(self): return f'scale_factor={self.scale_factor}' class SpaceToBatch(nn.Module): def __init__(self, block_size): super(SpaceToBatch, self).__init__() self.body = SpaceToDim(block_size, dim=0) def forward(self, x): return self.body(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'block_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (64, 4, 1, 1), (4, 1, 1, 1), 0), class SpaceToDim(nn.Module): def __init__(self, scale_factor, dims=(-2, -1), dim=0): super(SpaceToDim, self).__init__() self.scale_factor = scale_factor self.dims = dims self.dim = dim def forward(self, x): _shape = list(x.shape) shape = _shape.copy() dims = [x.dim() + self.dims[0] if self.dims[0] < 0 else self.dims[0 ], x.dim() + self.dims[1] if self.dims[1] < 0 else self.dims[1]] dims = [max(abs(dims[0]), abs(dims[1])), min(abs(dims[0]), abs(dims [1]))] if self.dim in dims: raise RuntimeError("Integrate dimension can't be space dimension!") shape[dims[0]] //= self.scale_factor shape[dims[1]] //= self.scale_factor shape.insert(dims[0] + 1, self.scale_factor) shape.insert(dims[1] + 1, self.scale_factor) dim = self.dim if self.dim < dims[1] else self.dim + 1 dim = dim if dim <= dims[0] else dim + 1 x = x.reshape(*shape) perm = [dim, dims[1] + 1, dims[0] + 2] perm = [i for i in range(min(perm))] + perm perm.extend(i for i in range(x.dim()) if i not in perm) x = x.permute(*perm) shape = _shape shape[self.dim] *= self.scale_factor ** 2 shape[self.dims[0]] //= self.scale_factor shape[self.dims[1]] //= self.scale_factor return x.reshape(*shape) def extra_repr(self): return f'scale_factor={self.scale_factor}' class SpaceToBatchNew(nn.Module): def __init__(self, block_size): super(SpaceToBatchNew, self).__init__() self.body = SpaceToDim(block_size, dim=0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cestcedric/TSSR-GAN
SpaceToBatch
false
1,649
[ "BSD-2-Clause", "MIT" ]
0
d6e1b50409e0f0591660552993e6d5b70d41e766
https://github.com/cestcedric/TSSR-GAN/tree/d6e1b50409e0f0591660552993e6d5b70d41e766
RpowInt
import torch class RpowInt(torch.nn.Module): def __init__(self): super(RpowInt, self).__init__() def forward(self, x): return 2 ** x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.exp2(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RpowIntNew(torch.nn.Module): def __init__(self): super(RpowIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bunderhi/torch2trt
RpowInt
false
1,650
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
Perplexity
import torch from torch import nn as nn from torch.nn.modules.loss import CrossEntropyLoss class Perplexity(CrossEntropyLoss): __constants__ = ['weight', 'ignore_index', 'reduction'] def __init__(self, weight=None, size_average=None, ignore_index=-100, reduce=None): super(Perplexity, self).__init__(weight, size_average, ignore_index, reduce, 'mean') def forward(self, input, target): loss = super(Perplexity, self).forward(input, target) return torch.exp(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn as nn from torch.nn.modules.loss import CrossEntropyLoss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_exp_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = tl_math.exp(tmp21) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_exp_mul_neg_sum_1[grid(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class PerplexityNew(CrossEntropyLoss): __constants__ = ['weight', 'ignore_index', 'reduction'] def __init__(self, weight=None, size_average=None, ignore_index=-100, reduce=None): super(PerplexityNew, self).__init__(weight, size_average, ignore_index, reduce, 'mean') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cesarali/Tyche
Perplexity
false
1,651
[ "MIT" ]
0
d892df9e0b982f538ae38221ff5848f6d726a4fb
https://github.com/cesarali/Tyche/tree/d892df9e0b982f538ae38221ff5848f6d726a4fb
NoiseInjection
import torch from torch import nn class NoiseInjection(nn.Module): def __init__(self, channel): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1)) def forward(self, image, noise): return image + self.weight * noise def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x3, xmask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_3, primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class NoiseInjectionNew(nn.Module): def __init__(self, channel): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1)) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
celdeldel/style_conditionnal_gan
NoiseInjection
false
1,652
[ "MIT" ]
0
3a4623560af1e12d46e2f9ffa9726c29df9d5680
https://github.com/celdeldel/style_conditionnal_gan/tree/3a4623560af1e12d46e2f9ffa9726c29df9d5680
Stub
import torch import torch.nn as nn import torch.utils.data class Stub(nn.Module): def __init__(self, shape): super(Stub, self).__init__() self.shape = shape return def forward(self, x): return x.new_ones(self.shape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_new_ones_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_new_ones_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf0, class StubNew(nn.Module): def __init__(self, shape): super(StubNew, self).__init__() self.shape = shape return def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cestcedric/TSSR-GAN
Stub
false
1,653
[ "BSD-2-Clause", "MIT" ]
0
d6e1b50409e0f0591660552993e6d5b70d41e766
https://github.com/cestcedric/TSSR-GAN/tree/d6e1b50409e0f0591660552993e6d5b70d41e766
FusedDownsample
import torch from torch import nn from torch.nn import functional as F from math import sqrt class FusedDownsample(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input): weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1]) weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:, :, 1:, :-1] + weight[:, :, :-1, :-1]) / 4 out = F.conv2d(input, weight, self.bias, stride=2, padding=self.pad) return out def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0 ) tmp12 = 0.1767766952966369 tmp13 = tmp11 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = -1 + x1 tmp17 = tmp16 >= tmp1 tmp18 = tmp16 < tmp3 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp6 tmp21 = tmp20 & tmp7 tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 * tmp12 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp21, tmp23, tmp24) tmp26 = tmp15 + tmp25 tmp27 = -1 + x0 tmp28 = tmp27 >= tmp1 tmp29 = tmp27 < tmp3 tmp30 = tmp8 & tmp28 tmp31 = tmp30 & tmp29 tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp12 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp26 + tmp35 tmp37 = tmp19 & tmp28 tmp38 = tmp37 & tmp29 tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask, other=0.0) tmp40 = tmp39 * tmp12 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp36 + tmp42 tmp44 = 0.25 tmp45 = tmp43 * tmp44 tl.store(in_out_ptr0 + x4, tmp45, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 14400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 30, 30), (3600, 900, 30, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(14400)](buf3, primals_2, 14400, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf3, primals_3, buf1 class FusedDownsampleNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
celdeldel/style_conditionnal_gan
FusedDownsample
false
1,654
[ "MIT" ]
0
3a4623560af1e12d46e2f9ffa9726c29df9d5680
https://github.com/celdeldel/style_conditionnal_gan/tree/3a4623560af1e12d46e2f9ffa9726c29df9d5680
CustomReLU
import torch import torch.nn as nn class CustomReLU(nn.Module): def __init__(self, max_z=6.0): super(CustomReLU, self).__init__() self.max_z = max_z def forward(self, x): return torch.clamp(x, min=0, max=self.max_z) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 6.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class CustomReLUNew(nn.Module): def __init__(self, max_z=6.0): super(CustomReLUNew, self).__init__() self.max_z = max_z def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
charlesmackin/tiny
CustomReLU
false
1,655
[ "Apache-2.0" ]
0
bf8afc5cfc15e12efdd3bca0d559adfdfc435981
https://github.com/charlesmackin/tiny/tree/bf8afc5cfc15e12efdd3bca0d559adfdfc435981
FusedUpsample
import torch from torch import nn from torch.nn import functional as F from math import sqrt class FusedUpsample(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input): weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1]) weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:, :, 1:, :-1] + weight[:, :, :-1, :-1]) / 4 out = F.conv_transpose2d(input, weight, self.bias, stride=2, padding=self.pad) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0 ) tmp12 = 0.1767766952966369 tmp13 = tmp11 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = -1 + x1 tmp17 = tmp16 >= tmp1 tmp18 = tmp16 < tmp3 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp6 tmp21 = tmp20 & tmp7 tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 * tmp12 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp21, tmp23, tmp24) tmp26 = tmp15 + tmp25 tmp27 = -1 + x0 tmp28 = tmp27 >= tmp1 tmp29 = tmp27 < tmp3 tmp30 = tmp8 & tmp28 tmp31 = tmp30 & tmp29 tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp12 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp26 + tmp35 tmp37 = tmp19 & tmp28 tmp38 = tmp37 & tmp29 tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask, other=0.0) tmp40 = tmp39 * tmp12 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp36 + tmp42 tmp44 = 0.25 tmp45 = tmp43 * tmp44 tl.store(in_out_ptr0 + x4, tmp45, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 121 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 11, 11), (484, 121, 11, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(1936)](buf3, primals_2, 1936, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf3, primals_3, buf1 class FusedUpsampleNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
celdeldel/style_conditionnal_gan
FusedUpsample
false
1,656
[ "MIT" ]
0
3a4623560af1e12d46e2f9ffa9726c29df9d5680
https://github.com/celdeldel/style_conditionnal_gan/tree/3a4623560af1e12d46e2f9ffa9726c29df9d5680
_UpsampleLinear
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class _UpsampleLinear(nn.Module): def __init__(self, scale): super(_UpsampleLinear, self).__init__() self._mode = 'linear', 'bilinear', 'trilinear' self.scale = scale def forward(self, x, scale=None): scale = scale or self.scale mode = self._mode[x.dim() - 3] return F.interpolate(x, scale_factor=scale, mode=mode, align_corners=False) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = triton_helpers.minimum(tmp28, tmp4) tmp30 = tmp25 * tmp29 tmp31 = tmp24 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp9.to(tl.float32) tmp39 = tmp8 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp7) tmp41 = triton_helpers.minimum(tmp40, tmp4) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tl.store(in_out_ptr0 + x4, tmp43, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (256)](buf2, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf2, class _UpsampleLinearNew(nn.Module): def __init__(self, scale): super(_UpsampleLinearNew, self).__init__() self._mode = 'linear', 'bilinear', 'trilinear' self.scale = scale def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cestcedric/TSSR-GAN
_UpsampleLinear
false
1,657
[ "BSD-2-Clause", "MIT" ]
0
d6e1b50409e0f0591660552993e6d5b70d41e766
https://github.com/cestcedric/TSSR-GAN/tree/d6e1b50409e0f0591660552993e6d5b70d41e766
FlawDetectorCriterion
import torch import torch.nn as nn import torch.nn.functional as F class FlawDetectorCriterion(nn.Module): """ Criterion of the flaw detector. """ def __init__(self): super(FlawDetectorCriterion, self).__init__() def forward(self, pred, gt, is_ssl=False, reduction=True): loss = F.mse_loss(pred, gt, reduction='none') if reduction: loss = torch.mean(loss, dim=(1, 2, 3)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 64.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mse_loss_0[grid(4)](buf1, arg1_1, arg0_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class FlawDetectorCriterionNew(nn.Module): """ Criterion of the flaw detector. """ def __init__(self): super(FlawDetectorCriterionNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
charlesCXK/PixelSSL
FlawDetectorCriterion
false
1,658
[ "Apache-2.0" ]
0
2e85e12c1db5b24206bfbbf2d7f6348ae82b2105
https://github.com/charlesCXK/PixelSSL/tree/2e85e12c1db5b24206bfbbf2d7f6348ae82b2105
ActivationNoise
import torch import torch.nn as nn class ActivationNoise(nn.Module): """Gaussian noise regularizer. Args: sigma (float, optional): relative standard deviation used to generate the noise. Relative means that it will be multiplied by the magnitude of the value your are adding the noise to. This means that sigma can be the same regardless of the scale of the vector. is_relative_detach (bool, optional): whether to detach the variable before computing the scale of the noise. If `False` then the scale of the noise won't be seen as a constant but something to optimize: this will bias the network to generate vectors with smaller values. """ def __init__(self, sigma=0.04, device='cuda', is_relative_detach=False): super().__init__() self.sigma = sigma self.is_relative_detach = is_relative_detach self.device = device def forward(self, x): if self.sigma > 0: with torch.no_grad(): scale = self.sigma * x.detach( ) if self.is_relative_detach else self.sigma * x sampled_noise = torch.ones_like(x).normal_() * scale x = x + sampled_noise return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_ones_like_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = 0.04 tmp3 = tmp0 * tmp2 tmp4 = tmp1 * tmp3 tmp5 = tmp0 + tmp4 tl.store(in_out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_ones_like_0[grid(256)](buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = torch.ops.aten.normal_functional.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = buf2 del buf2 triton_poi_fused_add_mul_1[grid(256)](buf3, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf3, class ActivationNoiseNew(nn.Module): """Gaussian noise regularizer. Args: sigma (float, optional): relative standard deviation used to generate the noise. Relative means that it will be multiplied by the magnitude of the value your are adding the noise to. This means that sigma can be the same regardless of the scale of the vector. is_relative_detach (bool, optional): whether to detach the variable before computing the scale of the noise. If `False` then the scale of the noise won't be seen as a constant but something to optimize: this will bias the network to generate vectors with smaller values. """ def __init__(self, sigma=0.04, device='cuda', is_relative_detach=False): super().__init__() self.sigma = sigma self.is_relative_detach = is_relative_detach self.device = device def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
charlesmackin/tiny
ActivationNoise
false
1,659
[ "Apache-2.0" ]
0
bf8afc5cfc15e12efdd3bca0d559adfdfc435981
https://github.com/charlesmackin/tiny/tree/bf8afc5cfc15e12efdd3bca0d559adfdfc435981
MeanVoxelFeatureExtractor
import torch import torch.nn as nn class VoxelFeatureExtractor(nn.Module): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): raise NotImplementedError def forward(self, **kwargs): raise NotImplementedError class MeanVoxelFeatureExtractor(VoxelFeatureExtractor): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): return cfg.DATA_CONFIG.NUM_POINT_FEATURES['use'] def forward(self, features, num_voxels, **kwargs): """ :param features: (N, max_points_of_each_voxel, 3 + C) :param num_voxels: (N) :param kwargs: :return: """ points_mean = features[:, :, :].sum(dim=1, keepdim=False ) / num_voxels.type_as(features).view(-1, 1) return points_mean.contiguous() def get_inputs(): return [torch.rand([64, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (64, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class VoxelFeatureExtractor(nn.Module): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): raise NotImplementedError def forward(self, **kwargs): raise NotImplementedError class MeanVoxelFeatureExtractorNew(VoxelFeatureExtractor): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): return cfg.DATA_CONFIG.NUM_POINT_FEATURES['use'] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
charlesyz/PCDet
MeanVoxelFeatureExtractor
false
1,660
[ "Apache-2.0" ]
0
1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
https://github.com/charlesyz/PCDet/tree/1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
make_dilation_dense
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class make_dilation_dense(nn.Module): def __init__(self, nChannels, growthRate, kernel_size=3): super(make_dilation_dense, self).__init__() self.conv = nn.Conv2d(nChannels, growthRate, kernel_size= kernel_size, padding=(kernel_size - 1) // 2 + 1, bias=True, dilation=2) def forward(self, x): out = F.relu(self.conv(x)) out = torch.cat((x, out), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nChannels': 4, 'growthRate': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, buf0, primals_2, buf1, 512, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(256)](buf0, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, primals_1, primals_3, buf2 class make_dilation_denseNew(nn.Module): def __init__(self, nChannels, growthRate, kernel_size=3): super(make_dilation_denseNew, self).__init__() self.conv = nn.Conv2d(nChannels, growthRate, kernel_size= kernel_size, padding=(kernel_size - 1) // 2 + 1, bias=True, dilation=2) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
cestcedric/TSSR-GAN
make_dilation_dense
false
1,661
[ "BSD-2-Clause", "MIT" ]
0
d6e1b50409e0f0591660552993e6d5b70d41e766
https://github.com/cestcedric/TSSR-GAN/tree/d6e1b50409e0f0591660552993e6d5b70d41e766
SelfAttentionRE
import torch import torch.nn.functional as F from torch import nn class SelfAttentionRE(nn.Module): def __init__(self, emb_dim): super().__init__() self.query_mlp = nn.Linear(in_features=emb_dim, out_features=1) self.value_mlp = nn.Linear(in_features=emb_dim, out_features=emb_dim) def forward(self, x): B, N, N, emb_dim = x.shape value = self.value_mlp(x.view(-1, emb_dim)).view(B, N, N, emb_dim) query = self.query_mlp(x.view(-1, emb_dim)).view(B, N, N, 1) att_weights = F.softmax(query, dim=0) out = torch.sum(value * att_weights, dim=0) return out, att_weights.detach() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'emb_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x2), xmask) tmp4 = tl.load(in_ptr1 + (16 + x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x2), xmask) tmp8 = tl.load(in_ptr1 + (32 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x2), xmask) tmp12 = tl.load(in_ptr1 + (48 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 triton_poi_fused_mul_sum_2[grid(64)](buf0, buf4, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) return buf5, buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf0, buf4 class SelfAttentionRENew(nn.Module): def __init__(self, emb_dim): super().__init__() self.query_mlp = nn.Linear(in_features=emb_dim, out_features=1) self.value_mlp = nn.Linear(in_features=emb_dim, out_features=emb_dim) def forward(self, input_0): primals_4 = self.query_mlp.weight primals_5 = self.query_mlp.bias primals_2 = self.value_mlp.weight primals_3 = self.value_mlp.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
chaitanya2334/lsm
SelfAttentionRE
false
1,662
[ "MIT" ]
0
504c732238b419cd77e7e0a97af040778ee9c7dd
https://github.com/chaitanya2334/lsm/tree/504c732238b419cd77e7e0a97af040778ee9c7dd
FFN
import torch import torch.nn as nn from collections import OrderedDict class FFN(nn.Module): def __init__(self, layer_arch, input_size, output_size, bias=True): super(FFN, self).__init__() self.layer_arch = layer_arch self.input_size = input_size self.output_size = output_size self.bias = bias self.build_model() def build_model(self): model_arch = [] unit = self.input_size for i, num in enumerate(self.layer_arch): model_arch.append(('dense_' + str(i), nn.Linear(unit, num, bias =self.bias))) model_arch.append(('nonlinear_' + str(i), nn.ReLU())) if i == 1: model_arch.append(('dropout_' + str(i), nn.Dropout())) unit = num model_arch.append(('dense_final', nn.Linear(unit, self.output_size, bias=self.bias))) model_arch.append(('act_final', nn.Sigmoid())) self.model = nn.Sequential(OrderedDict(model_arch)) def forward(self, inputs): return self.model(inputs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'layer_arch': [4, 4], 'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_sigmoid_1[grid(256)](buf5, primals_7, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7 class FFNNew(nn.Module): def __init__(self, layer_arch, input_size, output_size, bias=True): super(FFNNew, self).__init__() self.layer_arch = layer_arch self.input_size = input_size self.output_size = output_size self.bias = bias self.build_model() def build_model(self): model_arch = [] unit = self.input_size for i, num in enumerate(self.layer_arch): model_arch.append(('dense_' + str(i), nn.Linear(unit, num, bias =self.bias))) model_arch.append(('nonlinear_' + str(i), nn.ReLU())) if i == 1: model_arch.append(('dropout_' + str(i), nn.Dropout())) unit = num model_arch.append(('dense_final', nn.Linear(unit, self.output_size, bias=self.bias))) model_arch.append(('act_final', nn.Sigmoid())) self.model = nn.Sequential(OrderedDict(model_arch)) def forward(self, input_0): primals_1 = self.model.dense_0.weight primals_2 = self.model.dense_0.bias primals_4 = self.model.dense_1.weight primals_5 = self.model.dense_1.bias primals_6 = self.model.dense_final.weight primals_7 = self.model.dense_final.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
charlesxin97/ToolFinder_binder
FFN
false
1,663
[ "Apache-2.0" ]
0
49467d5519adcd6d881e57d460c97c37b6a45add
https://github.com/charlesxin97/ToolFinder_binder/tree/49467d5519adcd6d881e57d460c97c37b6a45add
FCDiscriminatorCriterion
import torch import torch.nn as nn import torch.nn.functional as F class FCDiscriminatorCriterion(nn.Module): def __init__(self): super(FCDiscriminatorCriterion, self).__init__() def forward(self, pred, gt): loss = F.binary_cross_entropy_with_logits(pred, gt, reduction='none') return torch.mean(loss, dim=(1, 2, 3)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(4)](buf1, arg0_1, arg1_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class FCDiscriminatorCriterionNew(nn.Module): def __init__(self): super(FCDiscriminatorCriterionNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
charlesCXK/PixelSSL
FCDiscriminatorCriterion
false
1,664
[ "Apache-2.0" ]
0
2e85e12c1db5b24206bfbbf2d7f6348ae82b2105
https://github.com/charlesCXK/PixelSSL/tree/2e85e12c1db5b24206bfbbf2d7f6348ae82b2105
PolicyNetwork
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from torch.distributions import Normal class PolicyNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size, action_range= 1.0, init_w=0.003, log_std_min=-20, log_std_max=2): super(PolicyNetwork, self).__init__() self.log_std_min = log_std_min self.log_std_max = log_std_max self.linear1 = nn.Linear(num_inputs, hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear3 = nn.Linear(hidden_size, hidden_size) self.linear4 = nn.Linear(hidden_size, hidden_size) self.mean_linear = nn.Linear(hidden_size, num_actions) self.mean_linear.weight.data.uniform_(-init_w, init_w) self.mean_linear.bias.data.uniform_(-init_w, init_w) self.log_std_linear = nn.Linear(hidden_size, num_actions) self.log_std_linear.weight.data.uniform_(-init_w, init_w) self.log_std_linear.bias.data.uniform_(-init_w, init_w) self.action_range = action_range self.num_actions = num_actions def forward(self, state): x = F.relu(self.linear1(state)) x = F.relu(self.linear2(x)) x = F.relu(self.linear3(x)) x = F.relu(self.linear4(x)) mean = F.tanh(self.mean_linear(x)) log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max) return mean, log_std def evaluate(self, state, deterministic, eval_noise_scale, epsilon=1e-06): """ generate action with state as input wrt the policy network, for calculating gradients """ mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(0, 1) z = normal.sample() action_0 = torch.tanh(mean + std * z) action = (self.action_range * mean if deterministic else self. action_range * action_0) log_prob = Normal(mean, std).log_prob(mean + std * z) - torch.log( 1.0 - action_0.pow(2) + epsilon) - np.log(self.action_range) log_prob = log_prob.sum(dim=1, keepdim=True) """ add noise """ eval_noise_clip = 2 * eval_noise_scale noise = normal.sample(action.shape) * eval_noise_scale noise = torch.clamp(noise, -eval_noise_clip, eval_noise_clip) action = action + noise return action, log_prob, z, mean, log_std def get_action(self, state, deterministic, explore_noise_scale): """ generate action for interaction with env """ state = torch.FloatTensor(state).unsqueeze(0) mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(0, 1) z = normal.sample() action = mean.detach().cpu().numpy()[0 ] if deterministic else torch.tanh(mean + std * z).detach().cpu( ).numpy()[0] """ add noise """ noise = normal.sample(action.shape) * explore_noise_scale action = self.action_range * action + noise.numpy() return action def sample_action(self): a = torch.FloatTensor(self.num_actions).uniform_(-1, 1) return self.action_range * a.numpy() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn from torch.distributions import Normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5, primals_7, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7, primals_9, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused_tanh_1[grid(256)](buf9, primals_11, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_2[grid(256)](buf10, primals_13, buf11, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del primals_13 return (buf9, buf11, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf9, buf12, primals_12, primals_10, buf13, primals_8, buf14, primals_6, buf15, primals_4, buf16) class PolicyNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size, action_range= 1.0, init_w=0.003, log_std_min=-20, log_std_max=2): super(PolicyNetworkNew, self).__init__() self.log_std_min = log_std_min self.log_std_max = log_std_max self.linear1 = nn.Linear(num_inputs, hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear3 = nn.Linear(hidden_size, hidden_size) self.linear4 = nn.Linear(hidden_size, hidden_size) self.mean_linear = nn.Linear(hidden_size, num_actions) self.mean_linear.weight.data.uniform_(-init_w, init_w) self.mean_linear.bias.data.uniform_(-init_w, init_w) self.log_std_linear = nn.Linear(hidden_size, num_actions) self.log_std_linear.weight.data.uniform_(-init_w, init_w) self.log_std_linear.bias.data.uniform_(-init_w, init_w) self.action_range = action_range self.num_actions = num_actions def evaluate(self, state, deterministic, eval_noise_scale, epsilon=1e-06): """ generate action with state as input wrt the policy network, for calculating gradients """ mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(0, 1) z = normal.sample() action_0 = torch.tanh(mean + std * z) action = (self.action_range * mean if deterministic else self. action_range * action_0) log_prob = Normal(mean, std).log_prob(mean + std * z) - torch.log( 1.0 - action_0.pow(2) + epsilon) - np.log(self.action_range) log_prob = log_prob.sum(dim=1, keepdim=True) """ add noise """ eval_noise_clip = 2 * eval_noise_scale noise = normal.sample(action.shape) * eval_noise_scale noise = torch.clamp(noise, -eval_noise_clip, eval_noise_clip) action = action + noise return action, log_prob, z, mean, log_std def get_action(self, state, deterministic, explore_noise_scale): """ generate action for interaction with env """ state = torch.FloatTensor(state).unsqueeze(0) mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(0, 1) z = normal.sample() action = mean.detach().cpu().numpy()[0 ] if deterministic else torch.tanh(mean + std * z).detach().cpu( ).numpy()[0] """ add noise """ noise = normal.sample(action.shape) * explore_noise_scale action = self.action_range * action + noise.numpy() return action def sample_action(self): a = torch.FloatTensor(self.num_actions).uniform_(-1, 1) return self.action_range * a.numpy() def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_8 = self.linear4.weight primals_9 = self.linear4.bias primals_10 = self.mean_linear.weight primals_11 = self.mean_linear.bias primals_12 = self.log_std_linear.weight primals_13 = self.log_std_linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
chagri/SOTA-RL-Algorithms
PolicyNetwork
false
1,665
[ "Apache-2.0" ]
0
58b416e7c706d8426dc402482e72ca7283568e71
https://github.com/chagri/SOTA-RL-Algorithms/tree/58b416e7c706d8426dc402482e72ca7283568e71
TemporalAggregation_Mean
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product class TemporalAggregation_Mean(nn.Module): def __init__(self, cfg): super(TemporalAggregation_Mean, self).__init__() self.K = cfg.K def forward(self, s): s = s.view(s.size(0) // self.K, self.K, s.size(1), s.size(2), s.size(3) ) s = torch.mean(s, dim=1) return s def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(K=4)}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class TemporalAggregation_MeanNew(nn.Module): def __init__(self, cfg): super(TemporalAggregation_MeanNew, self).__init__() self.K = cfg.K def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
burhanmudassar/pytorch-action-detection
TemporalAggregation_Mean
false
1,666
[ "MIT" ]
0
16afb9312248d73c0e2be56ac733e0a33040307e
https://github.com/burhanmudassar/pytorch-action-detection/tree/16afb9312248d73c0e2be56ac733e0a33040307e
GlobalMaxPool1D
import torch import torch.nn.functional as functional class GlobalMaxPool1D(torch.nn.Module): def __init__(self): super(GlobalMaxPool1D, self).__init__() def forward(self, x): """ x shape: (batch_size, channel, seq_len) return shape: (batch_size, channel, 1) """ return functional.max_pool1d(x, kernel_size=x.shape[2]) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), class GlobalMaxPool1DNew(torch.nn.Module): def __init__(self): super(GlobalMaxPool1DNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
charliemorning/mlws
GlobalMaxPool1D
false
1,667
[ "MIT" ]
0
8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
https://github.com/charliemorning/mlws/tree/8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
GlobalAvgPool1D
import torch import torch.nn.functional as functional class GlobalAvgPool1D(torch.nn.Module): def __init__(self): super(GlobalAvgPool1D, self).__init__() def forward(self, x): """ x shape: (batch_size, channel, seq_len) return shape: (batch_size, channel, 1) """ return functional.avg_pool1d(x, kernel_size=x.shape[2]) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), class GlobalAvgPool1DNew(torch.nn.Module): def __init__(self): super(GlobalAvgPool1DNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
charliemorning/mlws
GlobalAvgPool1D
false
1,668
[ "MIT" ]
0
8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
https://github.com/charliemorning/mlws/tree/8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
Mask
import torch import torch.nn as nn class Mask(nn.Module): def forward(self, seq, mask): seq_mask = torch.unsqueeze(mask, 2) seq_mask = torch.transpose(seq_mask.repeat(1, 1, seq.size()[1]), 1, 2) return seq.where(torch.eq(seq_mask, 1), torch.zeros_like(seq)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y1 = yindex // 4 y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 == tmp1 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp5, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_eq_where_zeros_like_0[grid(16, 4)](arg0_1, arg1_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class MaskNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
charliemorning/mlws
Mask
false
1,669
[ "MIT" ]
0
8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
https://github.com/charliemorning/mlws/tree/8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
Network
import torch import torch.nn as nn import torch.nn.functional as F class Network(nn.Module): def __init__(self, input_size, nb_action): super(Network, self).__init__() self.input_size = input_size self.nb_action = nb_action self.fc1 = nn.Linear(input_size, 32) self.fc2 = nn.Linear(32, 64) self.fc3 = nn.Linear(64, nb_action) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc1(state)) x = F.sigmoid(self.fc2(x)) q_values = self.fc3(x) return q_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'nb_action': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 32), (32, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 64), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(4096)](buf3, primals_5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0 ), buf3, primals_6, primals_4, buf5 class NetworkNew(nn.Module): def __init__(self, input_size, nb_action): super(NetworkNew, self).__init__() self.input_size = input_size self.nb_action = nb_action self.fc1 = nn.Linear(input_size, 32) self.fc2 = nn.Linear(32, 64) self.fc3 = nn.Linear(64, nb_action) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
cheapmouse94/Machine_Learning-Gates-python
Network
false
1,670
[ "MIT" ]
0
1e159ccf8f9a5db9104fa3926b85750787676e15
https://github.com/cheapmouse94/Machine_Learning-Gates-python/tree/1e159ccf8f9a5db9104fa3926b85750787676e15
FeedForwardNN
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class FeedForwardNN(nn.Module): """ A standard in_dim-64-64-out_dim Feed Forward Neural Network. """ def __init__(self, in_dim, out_dim): """ Initialize the network and set up the layers. Parameters: in_dim - input dimensions as an int out_dim - output dimensions as an int Return: None """ super(FeedForwardNN, self).__init__() self.layer1 = nn.Linear(in_dim, 64) self.layer2 = nn.Linear(64, 64) self.layer3 = nn.Linear(64, out_dim) def forward(self, obs): """ Runs a forward pass on the neural network. Parameters: obs - observation to pass as input Return: output - the output of our forward pass """ if isinstance(obs, np.ndarray): obs = torch.tensor(obs, dtype=torch.float) activation1 = F.relu(self.layer1(obs)) activation2 = F.relu(self.layer2(activation1)) output = self.layer3(activation2) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_3, buf6, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3, primals_5, buf5, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 64), (64, 1), 0), primals_6, buf5, primals_4, buf6 class FeedForwardNNNew(nn.Module): """ A standard in_dim-64-64-out_dim Feed Forward Neural Network. """ def __init__(self, in_dim, out_dim): """ Initialize the network and set up the layers. Parameters: in_dim - input dimensions as an int out_dim - output dimensions as an int Return: None """ super(FeedForwardNNNew, self).__init__() self.layer1 = nn.Linear(in_dim, 64) self.layer2 = nn.Linear(64, 64) self.layer3 = nn.Linear(64, out_dim) def forward(self, input_0): primals_2 = self.layer1.weight primals_3 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_6 = self.layer3.weight primals_7 = self.layer3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
chenjun-110/WZCQ
FeedForwardNN
false
1,671
[ "Apache-2.0" ]
0
e2de7743ad671e8632cfa084638555d7f1deb42f
https://github.com/chenjun-110/WZCQ/tree/e2de7743ad671e8632cfa084638555d7f1deb42f
MLPEncoder
import torch import torch.nn as nn import torch.nn.functional as F class MLPEncoder(nn.Module): def __init__(self, d_in, d_out): super(MLPEncoder, self).__init__() H1 = 100 H2 = 10 self.d_in = d_in self.l1 = nn.Linear(d_in, H1) self.l11 = nn.Linear(H1, H2) self.l2 = nn.Linear(H2, d_out) def forward(self, input): x = self.l1(input) x = F.relu(x) x = self.l11(x) x = F.relu(x) x = self.l2(x) x = F.tanh(x) return x def input_size(self): return self._d_in def output_size(self): return self._d_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 100), (100, 1)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (4, 10), (10, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1, primals_2, buf7, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 10), (1, 100), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(640)](buf3, primals_5, buf6, 640, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 4), (1, 10), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 100), (100, 1), 0 ), reinterpret_tensor(buf3, (64, 10), (10, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class MLPEncoderNew(nn.Module): def __init__(self, d_in, d_out): super(MLPEncoderNew, self).__init__() H1 = 100 H2 = 10 self.d_in = d_in self.l1 = nn.Linear(d_in, H1) self.l11 = nn.Linear(H1, H2) self.l2 = nn.Linear(H2, d_out) def input_size(self): return self._d_in def output_size(self): return self._d_out def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l11.weight primals_5 = self.l11.bias primals_6 = self.l2.weight primals_7 = self.l2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
cheng-xie/motionEncode
MLPEncoder
false
1,672
[ "MIT" ]
0
fa2152b3eaf2e09ad9477d054566db0a7bc4c7b4
https://github.com/cheng-xie/motionEncode/tree/fa2152b3eaf2e09ad9477d054566db0a7bc4c7b4
posFFN1d
import torch from torch import nn class posFFN1d(nn.Module): def __init__(self, d_hid, d_inner_hid, window=1, dropout=0.1): super().__init__() self.w_1 = nn.Conv1d(d_hid, d_inner_hid, kernel_size=window) self.relu = nn.ReLU() self.w_2 = nn.Conv1d(d_inner_hid, d_hid, kernel_size=window) self.layer_norm = nn.LayerNorm(d_hid) self.dropout = nn.Dropout(dropout) def forward(self, x): out = self.w_1(x) out = self.relu(out) out = self.w_2(out) out = self.dropout(out) return self.layer_norm(out + x) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_hid': 4, 'd_inner_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4 ), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4), (16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(4)](buf3, primals_3, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf3, primals_3, buf4, buf5, primals_6, primals_7, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del buf5 del primals_7 return (buf6, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0), buf3, buf7) class posFFN1dNew(nn.Module): def __init__(self, d_hid, d_inner_hid, window=1, dropout=0.1): super().__init__() self.w_1 = nn.Conv1d(d_hid, d_inner_hid, kernel_size=window) self.relu = nn.ReLU() self.w_2 = nn.Conv1d(d_inner_hid, d_hid, kernel_size=window) self.layer_norm = nn.LayerNorm(d_hid) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.w_1.weight primals_2 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
chaitanya2334/lsm
posFFN1d
false
1,673
[ "MIT" ]
0
504c732238b419cd77e7e0a97af040778ee9c7dd
https://github.com/chaitanya2334/lsm/tree/504c732238b419cd77e7e0a97af040778ee9c7dd
BaseModel
import torch from torchvision.transforms import * import torch.nn as nn import torch.nn.functional as F class BaseModel(nn.Module): def __init__(self, num_classes): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.25) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(128, num_classes) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = self.conv3(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout2(x) x = self.avgpool(x) x = x.view(-1, 128) return self.fc(x) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torchvision.transforms import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 430592 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 28 x2 = xindex // 1792 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7168 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7168 * x2), None) tmp3 = tl.load(in_ptr0 + (3584 + x0 + 128 * x1 + 7168 * x2), None) tmp5 = tl.load(in_ptr0 + (3648 + x0 + 128 * x1 + 7168 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 % 13 x2 = xindex // 1664 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 6656 * x2), xmask) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 6656 * x2), xmask) tmp7 = tl.load(in_ptr0 + (3328 + x0 + 256 * x1 + 6656 * x2), xmask) tmp12 = tl.load(in_ptr0 + (3456 + x0 + 256 * x1 + 6656 * x2), xmask) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_red_fused_max_pool2d_with_indices_mean_9(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 85 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x1 = xindex // 128 % 2 x0 = xindex % 128 x2 = xindex // 256 _tmp13 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = r3 + 85 * x1 tmp1 = tl.full([1, 1], 169, tl.int32) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x0 + 256 * ((r3 + 85 * x1) % 13) + 6656 * ((r3 + 85 * x1) // 13 % 13) + 86528 * x2), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (128 + x0 + 256 * ((r3 + 85 * x1) % 13) + 6656 * ((r3 + 85 * x1) // 13 % 13) + 86528 * x2), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.load(in_ptr0 + (3328 + x0 + 256 * ((r3 + 85 * x1) % 13) + 6656 * ((r3 + 85 * x1) // 13 % 13) + 86528 * x2), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tl.load(in_ptr0 + (3456 + x0 + 256 * ((r3 + 85 * x1) % 13) + 6656 * ((r3 + 85 * x1) // 13 % 13) + 86528 * x2), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = _tmp13 + tmp12 _tmp13 = tl.where(rmask & xmask, tmp14, _tmp13) tmp13 = tl.sum(_tmp13, 1)[:, None] tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_per_fused_max_pool2d_with_indices_mean_10(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 512 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 128 x1 = xindex // 128 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 256 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 169.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (4, 128), (128, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 7, 7), (147, 1, 21, 3), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(96, 49)](primals_1, buf0, 96, 49, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 58, 58), (107648, 1, 1856, 32)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_4[grid(430592)](buf5, primals_2, 430592, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 56, 56), (200704, 1, 3584, 64)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(802816)](buf7, primals_5, 802816, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 64, 28, 28), (50176, 1, 1792, 64), torch.float32) buf9 = empty_strided_cuda((4, 64, 28, 28), (50176, 1, 1792, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(200704)](buf7, buf8, buf9, 200704, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf8, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 26, 26), (86528, 1, 3328, 128)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_7[grid(346112)](buf11, primals_7, 346112, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf12 = empty_strided_cuda((4, 128, 13, 13), (21632, 1, 1664, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(86528)](buf11, buf12, 86528, XBLOCK=512, num_warps=8, num_stages=1) buf13 = empty_strided_cuda((4, 128, 1, 1, 2), (256, 1, 1024, 1024, 128), torch.float32) triton_red_fused_max_pool2d_with_indices_mean_9[grid(1024)](buf11, buf13, 1024, 85, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf15 = buf14 del buf14 triton_per_fused_max_pool2d_with_indices_mean_10[grid(512)](buf15, buf13, 512, 2, XBLOCK=256, num_warps=4, num_stages=1) del buf13 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf15, (4, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf16) del primals_9 return (buf16, buf0, buf1, buf2, buf3, buf5, buf7, buf8, buf9, buf11, buf12, reinterpret_tensor(buf15, (4, 128), (128, 1), 0), primals_8) class BaseModelNew(nn.Module): def __init__(self, num_classes): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.25) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(128, num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc.weight primals_9 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
bsm8734/BC_stage1_Image_Classification
BaseModel
false
1,674
[ "MIT" ]
0
f915a6fb6748bd9041b1dc2e917d732e202e9cc3
https://github.com/bsm8734/BC_stage1_Image_Classification/tree/f915a6fb6748bd9041b1dc2e917d732e202e9cc3
LinearCombine
import torch import torch.nn as nn import torch.nn.functional as F class LinearCombine(nn.Module): def __init__(self, layers_num, trainable=True, input_aware=False, word_level=False): super(LinearCombine, self).__init__() self.input_aware = input_aware self.word_level = word_level if input_aware: raise NotImplementedError('Input aware is not supported.') self.w = nn.Parameter(torch.full((layers_num, 1, 1, 1), 1.0 / layers_num), requires_grad=trainable) def forward(self, seq): nw = F.softmax(self.w, dim=0) seq = torch.mul(seq, nw) seq = torch.sum(seq, dim=0) return seq def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'layers_num': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (64 + x0), xmask) tmp10 = tl.load(in_ptr0 + (128 + x0), xmask) tmp13 = tl.load(in_ptr0 + (192 + x0), xmask) tmp3 = tmp2 - tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp4 / tmp4 tmp6 = tmp0 * tmp5 tmp8 = tmp7 * tmp5 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp5 tmp12 = tmp9 + tmp11 tmp14 = tmp13 * tmp5 tmp15 = tmp12 + tmp14 tl.store(out_ptr0 + x0, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0[grid(64)](primals_2, primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf0, primals_1, primals_2 class LinearCombineNew(nn.Module): def __init__(self, layers_num, trainable=True, input_aware=False, word_level=False): super(LinearCombineNew, self).__init__() self.input_aware = input_aware self.word_level = word_level if input_aware: raise NotImplementedError('Input aware is not supported.') self.w = nn.Parameter(torch.full((layers_num, 1, 1, 1), 1.0 / layers_num), requires_grad=trainable) def forward(self, input_0): primals_1 = self.w primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
charliemorning/mlws
LinearCombine
false
1,675
[ "MIT" ]
0
8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
https://github.com/charliemorning/mlws/tree/8e9bad59ca9f5e774cc1ae7fe454ff3b8a8e1784
autoencoder
import torch import torch.nn.functional as F class autoencoder(torch.nn.Module): def __init__(self, inputDim): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ h_size = 128 c_size = 8 use_bias = True super(autoencoder, self).__init__() self.linear1 = torch.nn.Linear(inputDim, h_size, bias=use_bias) self.linear2 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear3 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear4 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear5 = torch.nn.Linear(h_size, c_size, bias=use_bias) self.linear6 = torch.nn.Linear(c_size, h_size, bias=use_bias) self.linear7 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear8 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear9 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear10 = torch.nn.Linear(h_size, inputDim, bias=use_bias) def forward(self, x): """ In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors. """ h = self.linear1(x) h = F.relu(h) h = self.linear2(h) h = F.relu(h) h = self.linear3(h) h = F.relu(h) h = self.linear4(h) h = F.relu(h) h = self.linear5(h) h = F.relu(h) h = self.linear6(h) h = F.relu(h) h = self.linear7(h) h = F.relu(h) h = self.linear8(h) h = F.relu(h) h = self.linear9(h) h = F.relu(h) h = self.linear10(h) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputDim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128), (128, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128), (128, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (8, 128), (128, 1)) assert_size_stride(primals_11, (8,), (1,)) assert_size_stride(primals_12, (128, 8), (8, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128), (128, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128), (128, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128), (128, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (4, 128), (128, 1)) assert_size_stride(primals_21, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf27 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf27, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf26 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3, primals_5, buf26, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 128), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf4 buf25 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf5, primals_7, buf25, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf6 buf24 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf7, primals_9, buf24, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 128), (128, 1), 0), reinterpret_tensor(primals_10, (128, 8), (1, 128), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf8 buf23 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(512)](buf9, primals_11, buf23, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (64, 8), (8, 1), 0), reinterpret_tensor(primals_12, (8, 128), (1, 8), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf10 buf22 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf11, primals_13, buf22, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf12 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (64, 128), (128, 1), 0), reinterpret_tensor(primals_14, (128, 128), (1, 128), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf12 buf21 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf13, primals_15, buf21, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf14 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (64, 128), (128, 1), 0), reinterpret_tensor(primals_16, (128, 128), (1, 128), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf14 buf20 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf15, primals_17, buf20, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_17 buf16 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (64, 128), (128, 1), 0), reinterpret_tensor(primals_18, (128, 128), (1, 128), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf16 buf19 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf17, primals_19, buf19, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_19 buf18 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_21, reinterpret_tensor(buf17, (64, 128 ), (128, 1), 0), reinterpret_tensor(primals_20, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf18) del primals_21 return (reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(buf5, (64, 128), (128, 1), 0), reinterpret_tensor(buf7, (64, 128), (128, 1), 0), reinterpret_tensor(buf9, (64, 8), (8, 1), 0), reinterpret_tensor( buf11, (64, 128), (128, 1), 0), reinterpret_tensor(buf13, (64, 128), (128, 1), 0), reinterpret_tensor(buf15, (64, 128), (128, 1), 0), reinterpret_tensor(buf17, (64, 128), (128, 1), 0), primals_20, buf19, primals_18, buf20, primals_16, buf21, primals_14, buf22, primals_12, buf23, primals_10, buf24, primals_8, buf25, primals_6, buf26, primals_4, buf27) class autoencoderNew(torch.nn.Module): def __init__(self, inputDim): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ h_size = 128 c_size = 8 use_bias = True super(autoencoderNew, self).__init__() self.linear1 = torch.nn.Linear(inputDim, h_size, bias=use_bias) self.linear2 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear3 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear4 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear5 = torch.nn.Linear(h_size, c_size, bias=use_bias) self.linear6 = torch.nn.Linear(c_size, h_size, bias=use_bias) self.linear7 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear8 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear9 = torch.nn.Linear(h_size, h_size, bias=use_bias) self.linear10 = torch.nn.Linear(h_size, inputDim, bias=use_bias) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_8 = self.linear4.weight primals_9 = self.linear4.bias primals_10 = self.linear5.weight primals_11 = self.linear5.bias primals_12 = self.linear6.weight primals_13 = self.linear6.bias primals_14 = self.linear7.weight primals_15 = self.linear7.bias primals_16 = self.linear8.weight primals_17 = self.linear8.bias primals_18 = self.linear9.weight primals_19 = self.linear9.bias primals_20 = self.linear10.weight primals_21 = self.linear10.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
charlesmackin/tiny
autoencoder
false
1,676
[ "Apache-2.0" ]
0
bf8afc5cfc15e12efdd3bca0d559adfdfc435981
https://github.com/charlesmackin/tiny/tree/bf8afc5cfc15e12efdd3bca0d559adfdfc435981
MLPDecoder
import torch import torch.nn as nn import torch.nn.functional as F class MLPDecoder(nn.Module): def __init__(self, d_in, d_out): super(MLPDecoder, self).__init__() H1 = 10 H2 = 100 self._d_in = d_in self._d_out = d_out self.l1 = nn.Linear(d_in, H1) self.l11 = nn.Linear(H1, H2) self.l2 = nn.Linear(H2, d_out) def forward(self, input): x = self.l1(input) x = F.relu(x) x = self.l11(x) x = F.relu(x) x = self.l2(x) return x def input_size(self): return self._d_in def output_size(self): return self._d_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 10), (10, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, primals_2, buf6, 640, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 100), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(6400)](buf3, primals_5, buf5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor( buf3, (64, 100), (100, 1), 0), primals_6, buf5, primals_4, buf6 class MLPDecoderNew(nn.Module): def __init__(self, d_in, d_out): super(MLPDecoderNew, self).__init__() H1 = 10 H2 = 100 self._d_in = d_in self._d_out = d_out self.l1 = nn.Linear(d_in, H1) self.l11 = nn.Linear(H1, H2) self.l2 = nn.Linear(H2, d_out) def input_size(self): return self._d_in def output_size(self): return self._d_out def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l11.weight primals_5 = self.l11.bias primals_6 = self.l2.weight primals_7 = self.l2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
cheng-xie/motionEncode
MLPDecoder
false
1,677
[ "MIT" ]
0
fa2152b3eaf2e09ad9477d054566db0a7bc4c7b4
https://github.com/cheng-xie/motionEncode/tree/fa2152b3eaf2e09ad9477d054566db0a7bc4c7b4
AE
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.data class AE(nn.Module): """ Class for the AE using Fully Connected """ def __init__(self, opt): super().__init__() assert opt.isize % 4 == 0, 'input size has to be a multiple of 4' self.dense1 = nn.Linear(opt.isize, opt.isize) self.dense2 = nn.Linear(int(opt.isize / 2), int(opt.isize / 2)) self.dense3 = nn.Linear(int(opt.isize / 4), int(opt.isize / 4)) self.pool = nn.MaxPool1d(2, padding=0) self.up = nn.Upsample(scale_factor=2) self.dense4 = nn.Linear(int(opt.isize / 2), int(opt.isize / 2)) self.dense5 = nn.Linear(opt.isize, opt.isize) def forward(self, x): x1 = self.dense1(x) x1 = nn.ReLU()(x1) x2 = self.pool(torch.unsqueeze(x1, 0)) x2 = torch.squeeze(x2, 0) x2 = self.dense2(x2) x2 = nn.ReLU()(x2) x3 = self.pool(torch.unsqueeze(x2, 0)) x3 = torch.squeeze(x3, 0) encoded = self.dense3(x3) encoded = nn.ReLU()(encoded) y = self.up(torch.unsqueeze(encoded, 1)) y = torch.squeeze(y, 1) y = x2 + y y = self.dense4(y) y = nn.ReLU()(y) y = self.up(torch.unsqueeze(y, 1)) y = torch.squeeze(y, 1) decoded = self.dense5(y) decoded = nn.Tanh()(decoded) return decoded def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(isize=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp1 = x0 tmp2 = tmp1.to(tl.float32) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp4.to(tl.int32) tmp9 = tmp6 + tmp8 tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tmp0 + tmp11 tmp13 = 0.0 tmp14 = tmp0 <= tmp13 tl.store(out_ptr0 + x2, tmp12, xmask) tl.store(out_ptr1 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = tl.load(in_ptr0 + (tmp4 + 2 * x1), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + tmp4, xmask, eviction_policy='evict_last') tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_tanh_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (2, 2), (2, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (2, 2), (2, 1)) assert_size_stride(primals_9, (2,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 buf19 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((1, 4, 1, 2), (8, 2, 2, 1), torch.int8) buf3 = empty_strided_cuda((1, 4, 1, 2), (8, 2, 2, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_1[grid(8)](buf1, buf2, buf3, 8, XBLOCK=8, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (4, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 2), (1, 2), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(8)](buf5, primals_5, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.int8) buf7 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(4)](buf5, buf6, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 1), (1, 0), 0), primals_6, out=buf8) buf9 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(2)](buf9, 2, XBLOCK =2, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 2), (2, 1), torch.float32) buf18 = empty_strided_cuda((4, 2), (2, 1), torch.bool) triton_poi_fused_add_threshold_backward_5[grid(8)](buf5, buf8, primals_7, buf10, buf18, 8, XBLOCK=8, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (2, 2), (1, 2), 0), out=buf11) buf12 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_6[grid(4)](buf12, 4, XBLOCK=4, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) triton_poi_fused__unsafe_index_7[grid(16)](buf11, primals_9, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (4, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf14) buf15 = buf14 del buf14 triton_poi_fused_tanh_8[grid(16)](buf15, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 buf16 = empty_strided_cuda((4, 2), (2, 1), torch.bool) triton_poi_fused_relu_threshold_backward_9[grid(8)](buf11, primals_9, buf16, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf11 del primals_9 buf17 = empty_strided_cuda((4, 1), (1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_10[grid(4)](buf8, primals_7, buf17, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf8 del primals_7 return (buf15, primals_3, reinterpret_tensor(buf1, (1, 4, 1, 4), (16, 4, 4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 2), (2, 1), 0), reinterpret_tensor(buf5, (1, 4, 1, 2), (8, 2, 2, 1), 0), buf6, reinterpret_tensor(buf7, (4, 1), (1, 1), 0), buf9, buf10, buf12, reinterpret_tensor(buf13, (4, 4), (4, 1), 0), buf15, primals_10, buf16, primals_8, buf17, primals_6, buf18, primals_4, buf19) class AENew(nn.Module): """ Class for the AE using Fully Connected """ def __init__(self, opt): super().__init__() assert opt.isize % 4 == 0, 'input size has to be a multiple of 4' self.dense1 = nn.Linear(opt.isize, opt.isize) self.dense2 = nn.Linear(int(opt.isize / 2), int(opt.isize / 2)) self.dense3 = nn.Linear(int(opt.isize / 4), int(opt.isize / 4)) self.pool = nn.MaxPool1d(2, padding=0) self.up = nn.Upsample(scale_factor=2) self.dense4 = nn.Linear(int(opt.isize / 2), int(opt.isize / 2)) self.dense5 = nn.Linear(opt.isize, opt.isize) def forward(self, input_0): primals_1 = self.dense1.weight primals_2 = self.dense1.bias primals_4 = self.dense2.weight primals_5 = self.dense2.bias primals_6 = self.dense3.weight primals_7 = self.dense3.bias primals_8 = self.dense4.weight primals_9 = self.dense4.bias primals_3 = self.dense5.weight primals_11 = self.dense5.bias primals_10 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
cerbero94/GAN_CP
AE
false
1,678
[ "MIT" ]
0
e255f5f5b3733c55d47997c1ffc4161529701f8a
https://github.com/cerbero94/GAN_CP/tree/e255f5f5b3733c55d47997c1ffc4161529701f8a
my_BinaryCross
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class my_BinaryCross(nn.Module): def __init__(self, args): super(my_BinaryCross, self).__init__() self.args = args def forward(self, output, target, beat): modif_beat = 1.0 / torch.exp(beat) * 10 modif_beat[modif_beat < 7] = 5 / 100 modif_beat[modif_beat > 7] = 5 / 100 batch_size = len(output) len_pred = len(output[0]) loss = -torch.mean(modif_beat * torch.sum(target.view(batch_size, len_pred, -1) * torch.log(output.view(batch_size, len_pred, -1) ), dim=2)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'args': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_log_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tl_math.log(tmp1) tmp3 = tmp0 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1( in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp14 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp2 = tl.full([1], 1, tl.int32) tmp3 = tmp2 / tmp1 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = 10.0 tmp7 = tmp5 * tmp6 tmp8 = 7.0 tmp9 = tmp7 < tmp8 tmp10 = 0.05000000074505806 tmp11 = tl.where(tmp9, tmp10, tmp7) tmp12 = tmp11 > tmp8 tmp13 = tl.where(tmp12, tmp10, tmp11) tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 256.0 tmp20 = tmp18 / tmp19 tmp21 = -tmp20 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_log_mul_sum_0[grid(16)](arg2_1, arg1_1, buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del arg2_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1[ grid(1)](buf4, arg0_1, buf2, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf2 return buf4, class my_BinaryCrossNew(nn.Module): def __init__(self, args): super(my_BinaryCrossNew, self).__init__() self.args = args def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
carsault/chord_sequence_prediction
my_BinaryCross
false
1,679
[ "MIT" ]
0
6eb539a963ca6350bcf0c88b8d8756775ad7c488
https://github.com/carsault/chord_sequence_prediction/tree/6eb539a963ca6350bcf0c88b8d8756775ad7c488
GATLayer
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter class GATLayer(Module): def __init__(self, input_channel, output_channel, use_bias=True): super(GATLayer, self).__init__() self.use_bias = use_bias self.input_channel = input_channel self.output_channel = output_channel self.kernel = Parameter(torch.FloatTensor(input_channel, output_channel)) torch.nn.init.xavier_uniform_(self.kernel, gain=1.414) if self.use_bias: self.bias = Parameter(torch.FloatTensor(output_channel)) torch.nn.init.constant_(self.bias, 0) self.attn_kernel_self = Parameter(torch.FloatTensor(output_channel, 1)) torch.nn.init.xavier_uniform_(self.attn_kernel_self, gain=1.414) self.attn_kernel_neighs = Parameter(torch.FloatTensor( output_channel, 1)) torch.nn.init.xavier_uniform_(self.attn_kernel_neighs, gain=1.414) def forward(self, X, adj): features = torch.mm(X, self.kernel) attn_for_self = torch.mm(features, self.attn_kernel_self) attn_for_neigh = torch.mm(features, self.attn_kernel_neighs) attn = attn_for_self + attn_for_neigh.t() attn = F.leaky_relu(attn, negative_slope=0.2) mask = -10000000000.0 * torch.ones_like(attn) attn = torch.where(adj > 0, attn, mask) attn = torch.softmax(attn, dim=-1) attn = F.dropout(attn, p=0.5, training=self.training) features = F.dropout(features, p=0.5, training=self.training) node_features = torch.mm(attn, features) if self.use_bias: node_features = node_features + self.bias return node_features def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn.modules.module import Module from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.load(in_ptr3 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr3 + 1) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp22 = tl.load(in_ptr3 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = 0.2 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp1, tmp5, tmp7) tmp9 = -10000000000.0 tmp10 = tl.where(tmp0, tmp8, tmp9) tmp15 = tmp2 + tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = tl.where(tmp11, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp24 = tmp2 + tmp23 tmp25 = tmp24 * tmp6 tmp26 = tl.where(tmp21, tmp24, tmp25) tmp27 = tl.where(tmp20, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp33 = tmp2 + tmp32 tmp34 = tmp33 * tmp6 tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tl.store(out_ptr0 + x0, tmp37, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 0.2 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -10000000000.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_3, out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_gt_1[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(4)](buf4, buf3, buf1, buf2, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(16)](buf4, buf3, buf1, buf2, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf2 del buf5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 extern_kernels.addmm(primals_6, buf7, buf0, alpha=1, beta=1, out=buf8) del primals_6 return buf8, buf3, buf4, buf7, reinterpret_tensor(buf0, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0 ), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GATLayerNew(Module): def __init__(self, input_channel, output_channel, use_bias=True): super(GATLayerNew, self).__init__() self.use_bias = use_bias self.input_channel = input_channel self.output_channel = output_channel self.kernel = Parameter(torch.FloatTensor(input_channel, output_channel)) torch.nn.init.xavier_uniform_(self.kernel, gain=1.414) if self.use_bias: self.bias = Parameter(torch.FloatTensor(output_channel)) torch.nn.init.constant_(self.bias, 0) self.attn_kernel_self = Parameter(torch.FloatTensor(output_channel, 1)) torch.nn.init.xavier_uniform_(self.attn_kernel_self, gain=1.414) self.attn_kernel_neighs = Parameter(torch.FloatTensor( output_channel, 1)) torch.nn.init.xavier_uniform_(self.attn_kernel_neighs, gain=1.414) def forward(self, input_0, input_1): primals_1 = self.kernel primals_6 = self.bias primals_3 = self.attn_kernel_self primals_4 = self.attn_kernel_neighs primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
chengsilin/Graph_model
GATLayer
false
1,680
[ "MIT" ]
0
0d9714a8b02196fabf5b0ecd0680b7269a22c53b
https://github.com/chengsilin/Graph_model/tree/0d9714a8b02196fabf5b0ecd0680b7269a22c53b
FeedForward
import math import torch import numpy as np import torch.nn as nn def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class 全连接层(nn.Module): def __init__(self, 输入_接口, 输出_接口): super().__init__() np.random.seed(1) self.weight = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), (输入_接口, 输出_接口)))) self.bias = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), 输出_接口))) def forward(self, x): 输出 = torch.matmul(x, self.weight) 输出 = 输出 + self.bias return 输出 class FeedForward(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.1): super().__init__() self.linear_1 = 全连接层(d_model, d_ff) self.dropout = nn.Dropout(dropout) self.linear_2 = 全连接层(d_ff, d_model) def forward(self, x): x = self.dropout(gelu(self.linear_1(x))) x = self.linear_2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp2 * tmp2 tmp6 = tmp5 * tmp2 tmp7 = 0.044715 tmp8 = tmp6 * tmp7 tmp9 = tmp2 + tmp8 tmp10 = 0.7978845608028654 tmp11 = tmp9 * tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = 1.0 tmp14 = tmp12 + tmp13 tmp15 = tmp4 * tmp14 tl.store(out_ptr0 + x2, tmp15, None) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 2048), (2048, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (2048,), (1,)) assert_size_stride(primals_4, (2048, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(131072)](buf0, primals_3, buf1, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0 ), primals_4, out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_3, buf0, reinterpret_tensor(buf1, (2048, 64), (1, 2048), 0), reinterpret_tensor(primals_4, (4, 2048), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class 全连接层(nn.Module): def __init__(self, 输入_接口, 输出_接口): super().__init__() np.random.seed(1) self.weight = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), (输入_接口, 输出_接口)))) self.bias = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), 输出_接口))) def forward(self, x): 输出 = torch.matmul(x, self.weight) 输出 = 输出 + self.bias return 输出 class FeedForwardNew(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.1): super().__init__() self.linear_1 = 全连接层(d_model, d_ff) self.dropout = nn.Dropout(dropout) self.linear_2 = 全连接层(d_ff, d_model) def forward(self, input_0): primals_1 = self.linear_1.weight primals_3 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
chenjun-110/WZCQ
FeedForward
false
1,681
[ "Apache-2.0" ]
0
e2de7743ad671e8632cfa084638555d7f1deb42f
https://github.com/chenjun-110/WZCQ/tree/e2de7743ad671e8632cfa084638555d7f1deb42f
L1Part
import torch import torch.nn as nn from itertools import chain as chain import torch.utils.data from collections import OrderedDict import torch.hub import torch.nn.parallel import torch.optim class concatLayer(nn.Module): def __init__(self, in_channels, out_channels_perSub, i, j, appendix): super(concatLayer, self).__init__() self.firstSub = self.concatLayerSub(in_channels, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0') self.secondSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1') self.thirdSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2') def forward(self, x): firstSub = self.firstSub(x) secondSub = self.secondSub(firstSub) thirdSub = self.thirdSub(secondSub) out = torch.cat([firstSub, secondSub, thirdSub], 1) return out def concatLayerSub(self, in_channels, out_channels, layerName): concatLayerSubOrdered = OrderedDict() conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) concatLayerSubOrdered.update({('Mconv' + layerName): conv2d}) concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU( out_channels)}) return nn.Sequential(concatLayerSubOrdered) class stage(nn.Module): def __init__(self, stageID, in_channels, out_channels_perSub, mid_channels, out_channels, appendix): super(stage, self).__init__() self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1, stageID, appendix) self.secondConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 2, stageID, appendix) self.thirdConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 3, stageID, appendix) self.fourthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 4, stageID, appendix) self.fifthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 5, stageID, appendix) conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels, kernel_size=1, padding=0) prelu = nn.PReLU(mid_channels) self.afterConcatsFirst = nn.Sequential(OrderedDict({( 'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, ( 'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu})) conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0 ) self.afterConcatsSecond = nn.Sequential(OrderedDict({( 'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d})) def forward(self, x): x = self.firstConcat(x) x = self.secondConcat(x) x = self.thirdConcat(x) x = self.fourthConcat(x) x = self.fifthConcat(x) x = self.afterConcatsFirst(x) out = self.afterConcatsSecond(x) return out class L1Part(nn.Module): def __init__(self, in_channels, stage_out_channels): super(L1Part, self).__init__() self.firstStage = stage(0, in_channels, 96, 256, stage_out_channels, 'L1') self.secondStage = stage(1, in_channels + stage_out_channels, 128, 512, stage_out_channels, 'L1') def forward(self, features, L2Out): x = torch.cat([features, L2Out], 1) x = self.firstStage(x) x = torch.cat([features, x, L2Out], 1) out = self.secondStage(x) return out def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'stage_out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import chain as chain import torch.utils.data from collections import OrderedDict import torch.hub import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 288 x0 = xindex % 16 x2 = xindex // 4608 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 96, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1536 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 192, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-96 + x1) + 1536 * x2), tmp9, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 288, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-192 + x1) + 1536 * x2), tmp11, other=0.0) tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = tl.load(in_ptr3 + (-192 + x1), tmp11, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp17 * tmp14 tmp19 = tl.where(tmp16, tmp14, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp10, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x3, tmp23, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = tl.load(in_ptr2 + (-1 + x1), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 8, tl.int64) tmp18 = tl.load(in_ptr3 + (x0 + 16 * (-5 + x1) + 48 * x2), tmp15 & xmask, other=0.0) tmp19 = tl.where(tmp9, tmp14, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x3, tmp20, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 384 x0 = xindex % 16 x2 = xindex // 6144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 2048 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 256, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-128 + x1) + 2048 * x2), tmp9, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 384, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-256 + x1) + 2048 * x2), tmp11, other=0.0) tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = tl.load(in_ptr3 + (-256 + x1), tmp11, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp17 * tmp14 tmp19 = tl.where(tmp16, tmp14, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp10, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x3, tmp23, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102) = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (96, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (96,), (1,)) assert_size_stride(primals_5, (96,), (1,)) assert_size_stride(primals_6, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_7, (96,), (1,)) assert_size_stride(primals_8, (96,), (1,)) assert_size_stride(primals_9, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_10, (96,), (1,)) assert_size_stride(primals_11, (96,), (1,)) assert_size_stride(primals_12, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_13, (96,), (1,)) assert_size_stride(primals_14, (96,), (1,)) assert_size_stride(primals_15, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_16, (96,), (1,)) assert_size_stride(primals_17, (96,), (1,)) assert_size_stride(primals_18, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_19, (96,), (1,)) assert_size_stride(primals_20, (96,), (1,)) assert_size_stride(primals_21, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_22, (96,), (1,)) assert_size_stride(primals_23, (96,), (1,)) assert_size_stride(primals_24, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_25, (96,), (1,)) assert_size_stride(primals_26, (96,), (1,)) assert_size_stride(primals_27, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_28, (96,), (1,)) assert_size_stride(primals_29, (96,), (1,)) assert_size_stride(primals_30, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_31, (96,), (1,)) assert_size_stride(primals_32, (96,), (1,)) assert_size_stride(primals_33, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_34, (96,), (1,)) assert_size_stride(primals_35, (96,), (1,)) assert_size_stride(primals_36, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_37, (96,), (1,)) assert_size_stride(primals_38, (96,), (1,)) assert_size_stride(primals_39, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_40, (96,), (1,)) assert_size_stride(primals_41, (96,), (1,)) assert_size_stride(primals_42, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_43, (96,), (1,)) assert_size_stride(primals_44, (96,), (1,)) assert_size_stride(primals_45, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_46, (96,), (1,)) assert_size_stride(primals_47, (96,), (1,)) assert_size_stride(primals_48, (256, 288, 1, 1), (288, 1, 1, 1)) assert_size_stride(primals_49, (256,), (1,)) assert_size_stride(primals_50, (256,), (1,)) assert_size_stride(primals_51, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_52, (4,), (1,)) assert_size_stride(primals_53, (128, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_54, (128,), (1,)) assert_size_stride(primals_55, (128,), (1,)) assert_size_stride(primals_56, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_57, (128,), (1,)) assert_size_stride(primals_58, (128,), (1,)) assert_size_stride(primals_59, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_60, (128,), (1,)) assert_size_stride(primals_61, (128,), (1,)) assert_size_stride(primals_62, (128, 384, 3, 3), (3456, 9, 3, 1)) assert_size_stride(primals_63, (128,), (1,)) assert_size_stride(primals_64, (128,), (1,)) assert_size_stride(primals_65, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_66, (128,), (1,)) assert_size_stride(primals_67, (128,), (1,)) assert_size_stride(primals_68, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_69, (128,), (1,)) assert_size_stride(primals_70, (128,), (1,)) assert_size_stride(primals_71, (128, 384, 3, 3), (3456, 9, 3, 1)) assert_size_stride(primals_72, (128,), (1,)) assert_size_stride(primals_73, (128,), (1,)) assert_size_stride(primals_74, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_75, (128,), (1,)) assert_size_stride(primals_76, (128,), (1,)) assert_size_stride(primals_77, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_78, (128,), (1,)) assert_size_stride(primals_79, (128,), (1,)) assert_size_stride(primals_80, (128, 384, 3, 3), (3456, 9, 3, 1)) assert_size_stride(primals_81, (128,), (1,)) assert_size_stride(primals_82, (128,), (1,)) assert_size_stride(primals_83, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_84, (128,), (1,)) assert_size_stride(primals_85, (128,), (1,)) assert_size_stride(primals_86, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_87, (128,), (1,)) assert_size_stride(primals_88, (128,), (1,)) assert_size_stride(primals_89, (128, 384, 3, 3), (3456, 9, 3, 1)) assert_size_stride(primals_90, (128,), (1,)) assert_size_stride(primals_91, (128,), (1,)) assert_size_stride(primals_92, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_93, (128,), (1,)) assert_size_stride(primals_94, (128,), (1,)) assert_size_stride(primals_95, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_96, (128,), (1,)) assert_size_stride(primals_97, (128,), (1,)) assert_size_stride(primals_98, (512, 384, 1, 1), (384, 1, 1, 1)) assert_size_stride(primals_99, (512,), (1,)) assert_size_stride(primals_100, (512,), (1,)) assert_size_stride(primals_101, (4, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_102, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 96, 4, 4), (1536, 16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf2, primals_4, primals_5, buf3, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 96, 4, 4), (1536, 16, 4, 1)) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf5, primals_7, primals_8, buf6, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf7 = extern_kernels.convolution(buf6, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 96, 4, 4), (1536, 16, 4, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(6144)](buf8, primals_10, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 buf9 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_3[grid(18432)](buf3, buf6, buf8, primals_11, buf9, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 96, 4, 4), (1536, 16, 4, 1)) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf11, primals_13, primals_14, buf12, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf13 = extern_kernels.convolution(buf12, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 96, 4, 4), (1536, 16, 4, 1)) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf14, primals_16, primals_17, buf15, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_16 buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 96, 4, 4), (1536, 16, 4, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_2[grid(6144)](buf17, primals_19, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf18 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_3[grid(18432)](buf12, buf15, buf17, primals_20, buf18, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 96, 4, 4), (1536, 16, 4, 1)) buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf20, primals_22, primals_23, buf21, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_22 buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 96, 4, 4), (1536, 16, 4, 1)) buf23 = buf22 del buf22 buf24 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf23, primals_25, primals_26, buf24, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf25 = extern_kernels.convolution(buf24, primals_27, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 96, 4, 4), (1536, 16, 4, 1)) buf26 = buf25 del buf25 triton_poi_fused_convolution_2[grid(6144)](buf26, primals_28, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_28 buf27 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_3[grid(18432)](buf21, buf24, buf26, primals_29, buf27, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 96, 4, 4), (1536, 16, 4, 1)) buf29 = buf28 del buf28 buf30 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf29, primals_31, primals_32, buf30, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_31 buf31 = extern_kernels.convolution(buf30, primals_33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 96, 4, 4), (1536, 16, 4, 1)) buf32 = buf31 del buf31 buf33 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf32, primals_34, primals_35, buf33, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_34 buf34 = extern_kernels.convolution(buf33, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 96, 4, 4), (1536, 16, 4, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_2[grid(6144)](buf35, primals_37, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_37 buf36 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_3[grid(18432)](buf30, buf33, buf35, primals_38, buf36, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf37 = extern_kernels.convolution(buf36, primals_39, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 96, 4, 4), (1536, 16, 4, 1)) buf38 = buf37 del buf37 buf39 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf38, primals_40, primals_41, buf39, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_40 buf40 = extern_kernels.convolution(buf39, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 96, 4, 4), (1536, 16, 4, 1)) buf41 = buf40 del buf40 buf42 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf41, primals_43, primals_44, buf42, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_43 buf43 = extern_kernels.convolution(buf42, primals_45, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 96, 4, 4), (1536, 16, 4, 1)) buf44 = buf43 del buf43 triton_poi_fused_convolution_2[grid(6144)](buf44, primals_46, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_46 buf45 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_3[grid(18432)](buf39, buf42, buf44, primals_47, buf45, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf46 = extern_kernels.convolution(buf45, primals_48, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 256, 4, 4), (4096, 16, 4, 1)) buf47 = buf46 del buf46 buf48 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_4[grid(16384)](buf47, primals_49, primals_50, buf48, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_49 buf49 = extern_kernels.convolution(buf48, primals_51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 4, 4, 4), (64, 16, 4, 1)) buf50 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_5[grid(512)](primals_1, buf49, primals_52, primals_2, buf50, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf49 del primals_1 del primals_2 del primals_52 buf51 = extern_kernels.convolution(buf50, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 4, 4), (2048, 16, 4, 1)) buf52 = buf51 del buf51 buf53 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf52, primals_54, primals_55, buf53, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_54 buf54 = extern_kernels.convolution(buf53, primals_56, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 128, 4, 4), (2048, 16, 4, 1)) buf55 = buf54 del buf54 buf56 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf55, primals_57, primals_58, buf56, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_57 buf57 = extern_kernels.convolution(buf56, primals_59, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 128, 4, 4), (2048, 16, 4, 1)) buf58 = buf57 del buf57 triton_poi_fused_convolution_7[grid(8192)](buf58, primals_60, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_60 buf59 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch. float32) triton_poi_fused_cat_8[grid(24576)](buf53, buf56, buf58, primals_61, buf59, 24576, XBLOCK=256, num_warps=4, num_stages=1) buf60 = extern_kernels.convolution(buf59, primals_62, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 128, 4, 4), (2048, 16, 4, 1)) buf61 = buf60 del buf60 buf62 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf61, primals_63, primals_64, buf62, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_63 buf63 = extern_kernels.convolution(buf62, primals_65, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 128, 4, 4), (2048, 16, 4, 1)) buf64 = buf63 del buf63 buf65 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf64, primals_66, primals_67, buf65, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_66 buf66 = extern_kernels.convolution(buf65, primals_68, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 128, 4, 4), (2048, 16, 4, 1)) buf67 = buf66 del buf66 triton_poi_fused_convolution_7[grid(8192)](buf67, primals_69, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_69 buf68 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch. float32) triton_poi_fused_cat_8[grid(24576)](buf62, buf65, buf67, primals_70, buf68, 24576, XBLOCK=256, num_warps=4, num_stages=1) buf69 = extern_kernels.convolution(buf68, primals_71, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf69, (4, 128, 4, 4), (2048, 16, 4, 1)) buf70 = buf69 del buf69 buf71 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf70, primals_72, primals_73, buf71, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_72 buf72 = extern_kernels.convolution(buf71, primals_74, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf72, (4, 128, 4, 4), (2048, 16, 4, 1)) buf73 = buf72 del buf72 buf74 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf73, primals_75, primals_76, buf74, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_75 buf75 = extern_kernels.convolution(buf74, primals_77, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf75, (4, 128, 4, 4), (2048, 16, 4, 1)) buf76 = buf75 del buf75 triton_poi_fused_convolution_7[grid(8192)](buf76, primals_78, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_78 buf77 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch. float32) triton_poi_fused_cat_8[grid(24576)](buf71, buf74, buf76, primals_79, buf77, 24576, XBLOCK=256, num_warps=4, num_stages=1) buf78 = extern_kernels.convolution(buf77, primals_80, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf78, (4, 128, 4, 4), (2048, 16, 4, 1)) buf79 = buf78 del buf78 buf80 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf79, primals_81, primals_82, buf80, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_81 buf81 = extern_kernels.convolution(buf80, primals_83, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf81, (4, 128, 4, 4), (2048, 16, 4, 1)) buf82 = buf81 del buf81 buf83 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf82, primals_84, primals_85, buf83, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_84 buf84 = extern_kernels.convolution(buf83, primals_86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 128, 4, 4), (2048, 16, 4, 1)) buf85 = buf84 del buf84 triton_poi_fused_convolution_7[grid(8192)](buf85, primals_87, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_87 buf86 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch. float32) triton_poi_fused_cat_8[grid(24576)](buf80, buf83, buf85, primals_88, buf86, 24576, XBLOCK=256, num_warps=4, num_stages=1) buf87 = extern_kernels.convolution(buf86, primals_89, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 128, 4, 4), (2048, 16, 4, 1)) buf88 = buf87 del buf87 buf89 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf88, primals_90, primals_91, buf89, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_90 buf90 = extern_kernels.convolution(buf89, primals_92, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf90, (4, 128, 4, 4), (2048, 16, 4, 1)) buf91 = buf90 del buf90 buf92 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf91, primals_93, primals_94, buf92, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_93 buf93 = extern_kernels.convolution(buf92, primals_95, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf93, (4, 128, 4, 4), (2048, 16, 4, 1)) buf94 = buf93 del buf93 triton_poi_fused_convolution_7[grid(8192)](buf94, primals_96, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_96 buf95 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch. float32) triton_poi_fused_cat_8[grid(24576)](buf89, buf92, buf94, primals_97, buf95, 24576, XBLOCK=256, num_warps=4, num_stages=1) buf96 = extern_kernels.convolution(buf95, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf96, (4, 512, 4, 4), (8192, 16, 4, 1)) buf97 = buf96 del buf96 buf98 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_9[grid(32768)](buf97, primals_99, primals_100, buf98, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_99 buf99 = extern_kernels.convolution(buf98, primals_101, stride=(1, 1 ), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 4, 4, 4), (64, 16, 4, 1)) buf100 = buf99 del buf99 triton_poi_fused_convolution_10[grid(256)](buf100, primals_102, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_102 return (buf100, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_55, primals_56, primals_58, primals_59, primals_61, primals_62, primals_64, primals_65, primals_67, primals_68, primals_70, primals_71, primals_73, primals_74, primals_76, primals_77, primals_79, primals_80, primals_82, primals_83, primals_85, primals_86, primals_88, primals_89, primals_91, primals_92, primals_94, primals_95, primals_97, primals_98, primals_100, primals_101, buf0, buf2, buf3, buf5, buf6, buf8, buf9, buf11, buf12, buf14, buf15, buf17, buf18, buf20, buf21, buf23, buf24, buf26, buf27, buf29, buf30, buf32, buf33, buf35, buf36, buf38, buf39, buf41, buf42, buf44, buf45, buf47, buf48, buf50, buf52, buf53, buf55, buf56, buf58, buf59, buf61, buf62, buf64, buf65, buf67, buf68, buf70, buf71, buf73, buf74, buf76, buf77, buf79, buf80, buf82, buf83, buf85, buf86, buf88, buf89, buf91, buf92, buf94, buf95, buf97, buf98) class concatLayer(nn.Module): def __init__(self, in_channels, out_channels_perSub, i, j, appendix): super(concatLayer, self).__init__() self.firstSub = self.concatLayerSub(in_channels, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0') self.secondSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1') self.thirdSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2') def forward(self, x): firstSub = self.firstSub(x) secondSub = self.secondSub(firstSub) thirdSub = self.thirdSub(secondSub) out = torch.cat([firstSub, secondSub, thirdSub], 1) return out def concatLayerSub(self, in_channels, out_channels, layerName): concatLayerSubOrdered = OrderedDict() conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) concatLayerSubOrdered.update({('Mconv' + layerName): conv2d}) concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU( out_channels)}) return nn.Sequential(concatLayerSubOrdered) class stage(nn.Module): def __init__(self, stageID, in_channels, out_channels_perSub, mid_channels, out_channels, appendix): super(stage, self).__init__() self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1, stageID, appendix) self.secondConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 2, stageID, appendix) self.thirdConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 3, stageID, appendix) self.fourthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 4, stageID, appendix) self.fifthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 5, stageID, appendix) conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels, kernel_size=1, padding=0) prelu = nn.PReLU(mid_channels) self.afterConcatsFirst = nn.Sequential(OrderedDict({( 'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, ( 'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu})) conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0 ) self.afterConcatsSecond = nn.Sequential(OrderedDict({( 'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d})) def forward(self, x): x = self.firstConcat(x) x = self.secondConcat(x) x = self.thirdConcat(x) x = self.fourthConcat(x) x = self.fifthConcat(x) x = self.afterConcatsFirst(x) out = self.afterConcatsSecond(x) return out class L1PartNew(nn.Module): def __init__(self, in_channels, stage_out_channels): super(L1PartNew, self).__init__() self.firstStage = stage(0, in_channels, 96, 256, stage_out_channels, 'L1') self.secondStage = stage(1, in_channels + stage_out_channels, 128, 512, stage_out_channels, 'L1') def forward(self, input_0, input_1): primals_3 = (self.firstStage.firstConcat.firstSub. Mconv1_stage0_L1_0.weight) primals_4 = (self.firstStage.firstConcat.firstSub. Mconv1_stage0_L1_0.bias) primals_5 = (self.firstStage.firstConcat.firstSub. Mprelu1_stage0_L1_0.weight) primals_6 = (self.firstStage.firstConcat.secondSub. Mconv1_stage0_L1_1.weight) primals_7 = (self.firstStage.firstConcat.secondSub. Mconv1_stage0_L1_1.bias) primals_8 = (self.firstStage.firstConcat.secondSub. Mprelu1_stage0_L1_1.weight) primals_9 = (self.firstStage.firstConcat.thirdSub. Mconv1_stage0_L1_2.weight) primals_10 = (self.firstStage.firstConcat.thirdSub. Mconv1_stage0_L1_2.bias) primals_11 = (self.firstStage.firstConcat.thirdSub. Mprelu1_stage0_L1_2.weight) primals_12 = (self.firstStage.secondConcat.firstSub. Mconv2_stage0_L1_0.weight) primals_13 = (self.firstStage.secondConcat.firstSub. Mconv2_stage0_L1_0.bias) primals_14 = (self.firstStage.secondConcat.firstSub. Mprelu2_stage0_L1_0.weight) primals_15 = (self.firstStage.secondConcat.secondSub. Mconv2_stage0_L1_1.weight) primals_16 = (self.firstStage.secondConcat.secondSub. Mconv2_stage0_L1_1.bias) primals_17 = (self.firstStage.secondConcat.secondSub. Mprelu2_stage0_L1_1.weight) primals_18 = (self.firstStage.secondConcat.thirdSub. Mconv2_stage0_L1_2.weight) primals_19 = (self.firstStage.secondConcat.thirdSub. Mconv2_stage0_L1_2.bias) primals_20 = (self.firstStage.secondConcat.thirdSub. Mprelu2_stage0_L1_2.weight) primals_21 = (self.firstStage.thirdConcat.firstSub. Mconv3_stage0_L1_0.weight) primals_22 = (self.firstStage.thirdConcat.firstSub. Mconv3_stage0_L1_0.bias) primals_23 = (self.firstStage.thirdConcat.firstSub. Mprelu3_stage0_L1_0.weight) primals_24 = (self.firstStage.thirdConcat.secondSub. Mconv3_stage0_L1_1.weight) primals_25 = (self.firstStage.thirdConcat.secondSub. Mconv3_stage0_L1_1.bias) primals_26 = (self.firstStage.thirdConcat.secondSub. Mprelu3_stage0_L1_1.weight) primals_27 = (self.firstStage.thirdConcat.thirdSub. Mconv3_stage0_L1_2.weight) primals_28 = (self.firstStage.thirdConcat.thirdSub. Mconv3_stage0_L1_2.bias) primals_29 = (self.firstStage.thirdConcat.thirdSub. Mprelu3_stage0_L1_2.weight) primals_30 = (self.firstStage.fourthConcat.firstSub. Mconv4_stage0_L1_0.weight) primals_31 = (self.firstStage.fourthConcat.firstSub. Mconv4_stage0_L1_0.bias) primals_32 = (self.firstStage.fourthConcat.firstSub. Mprelu4_stage0_L1_0.weight) primals_33 = (self.firstStage.fourthConcat.secondSub. Mconv4_stage0_L1_1.weight) primals_34 = (self.firstStage.fourthConcat.secondSub. Mconv4_stage0_L1_1.bias) primals_35 = (self.firstStage.fourthConcat.secondSub. Mprelu4_stage0_L1_1.weight) primals_36 = (self.firstStage.fourthConcat.thirdSub. Mconv4_stage0_L1_2.weight) primals_37 = (self.firstStage.fourthConcat.thirdSub. Mconv4_stage0_L1_2.bias) primals_38 = (self.firstStage.fourthConcat.thirdSub. Mprelu4_stage0_L1_2.weight) primals_39 = (self.firstStage.fifthConcat.firstSub. Mconv5_stage0_L1_0.weight) primals_40 = (self.firstStage.fifthConcat.firstSub. Mconv5_stage0_L1_0.bias) primals_41 = (self.firstStage.fifthConcat.firstSub. Mprelu5_stage0_L1_0.weight) primals_42 = (self.firstStage.fifthConcat.secondSub. Mconv5_stage0_L1_1.weight) primals_43 = (self.firstStage.fifthConcat.secondSub. Mconv5_stage0_L1_1.bias) primals_44 = (self.firstStage.fifthConcat.secondSub. Mprelu5_stage0_L1_1.weight) primals_45 = (self.firstStage.fifthConcat.thirdSub. Mconv5_stage0_L1_2.weight) primals_46 = (self.firstStage.fifthConcat.thirdSub. Mconv5_stage0_L1_2.bias) primals_47 = (self.firstStage.fifthConcat.thirdSub. Mprelu5_stage0_L1_2.weight) primals_48 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L1.weight primals_49 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L1.bias primals_50 = self.firstStage.afterConcatsFirst.Mprelu6_stage0_L1.weight primals_51 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L1.weight primals_52 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L1.bias primals_53 = (self.secondStage.firstConcat.firstSub. Mconv1_stage1_L1_0.weight) primals_54 = (self.secondStage.firstConcat.firstSub. Mconv1_stage1_L1_0.bias) primals_55 = (self.secondStage.firstConcat.firstSub. Mprelu1_stage1_L1_0.weight) primals_56 = (self.secondStage.firstConcat.secondSub. Mconv1_stage1_L1_1.weight) primals_57 = (self.secondStage.firstConcat.secondSub. Mconv1_stage1_L1_1.bias) primals_58 = (self.secondStage.firstConcat.secondSub. Mprelu1_stage1_L1_1.weight) primals_59 = (self.secondStage.firstConcat.thirdSub. Mconv1_stage1_L1_2.weight) primals_60 = (self.secondStage.firstConcat.thirdSub. Mconv1_stage1_L1_2.bias) primals_61 = (self.secondStage.firstConcat.thirdSub. Mprelu1_stage1_L1_2.weight) primals_62 = (self.secondStage.secondConcat.firstSub. Mconv2_stage1_L1_0.weight) primals_63 = (self.secondStage.secondConcat.firstSub. Mconv2_stage1_L1_0.bias) primals_64 = (self.secondStage.secondConcat.firstSub. Mprelu2_stage1_L1_0.weight) primals_65 = (self.secondStage.secondConcat.secondSub. Mconv2_stage1_L1_1.weight) primals_66 = (self.secondStage.secondConcat.secondSub. Mconv2_stage1_L1_1.bias) primals_67 = (self.secondStage.secondConcat.secondSub. Mprelu2_stage1_L1_1.weight) primals_68 = (self.secondStage.secondConcat.thirdSub. Mconv2_stage1_L1_2.weight) primals_69 = (self.secondStage.secondConcat.thirdSub. Mconv2_stage1_L1_2.bias) primals_70 = (self.secondStage.secondConcat.thirdSub. Mprelu2_stage1_L1_2.weight) primals_71 = (self.secondStage.thirdConcat.firstSub. Mconv3_stage1_L1_0.weight) primals_72 = (self.secondStage.thirdConcat.firstSub. Mconv3_stage1_L1_0.bias) primals_73 = (self.secondStage.thirdConcat.firstSub. Mprelu3_stage1_L1_0.weight) primals_74 = (self.secondStage.thirdConcat.secondSub. Mconv3_stage1_L1_1.weight) primals_75 = (self.secondStage.thirdConcat.secondSub. Mconv3_stage1_L1_1.bias) primals_76 = (self.secondStage.thirdConcat.secondSub. Mprelu3_stage1_L1_1.weight) primals_77 = (self.secondStage.thirdConcat.thirdSub. Mconv3_stage1_L1_2.weight) primals_78 = (self.secondStage.thirdConcat.thirdSub. Mconv3_stage1_L1_2.bias) primals_79 = (self.secondStage.thirdConcat.thirdSub. Mprelu3_stage1_L1_2.weight) primals_80 = (self.secondStage.fourthConcat.firstSub. Mconv4_stage1_L1_0.weight) primals_81 = (self.secondStage.fourthConcat.firstSub. Mconv4_stage1_L1_0.bias) primals_82 = (self.secondStage.fourthConcat.firstSub. Mprelu4_stage1_L1_0.weight) primals_83 = (self.secondStage.fourthConcat.secondSub. Mconv4_stage1_L1_1.weight) primals_84 = (self.secondStage.fourthConcat.secondSub. Mconv4_stage1_L1_1.bias) primals_85 = (self.secondStage.fourthConcat.secondSub. Mprelu4_stage1_L1_1.weight) primals_86 = (self.secondStage.fourthConcat.thirdSub. Mconv4_stage1_L1_2.weight) primals_87 = (self.secondStage.fourthConcat.thirdSub. Mconv4_stage1_L1_2.bias) primals_88 = (self.secondStage.fourthConcat.thirdSub. Mprelu4_stage1_L1_2.weight) primals_89 = (self.secondStage.fifthConcat.firstSub. Mconv5_stage1_L1_0.weight) primals_90 = (self.secondStage.fifthConcat.firstSub. Mconv5_stage1_L1_0.bias) primals_91 = (self.secondStage.fifthConcat.firstSub. Mprelu5_stage1_L1_0.weight) primals_92 = (self.secondStage.fifthConcat.secondSub. Mconv5_stage1_L1_1.weight) primals_93 = (self.secondStage.fifthConcat.secondSub. Mconv5_stage1_L1_1.bias) primals_94 = (self.secondStage.fifthConcat.secondSub. Mprelu5_stage1_L1_1.weight) primals_95 = (self.secondStage.fifthConcat.thirdSub. Mconv5_stage1_L1_2.weight) primals_96 = (self.secondStage.fifthConcat.thirdSub. Mconv5_stage1_L1_2.bias) primals_97 = (self.secondStage.fifthConcat.thirdSub. Mprelu5_stage1_L1_2.weight) primals_98 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L1.weight primals_99 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L1.bias primals_100 = (self.secondStage.afterConcatsFirst.Mprelu6_stage1_L1 .weight) primals_101 = (self.secondStage.afterConcatsSecond.Mconv7_stage1_L1 .weight) primals_102 = self.secondStage.afterConcatsSecond.Mconv7_stage1_L1.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102]) return output[0]
byeongjokim/LateTemporalModeling3DCNN_for_sign
L1Part
false
1,682
[ "MIT" ]
0
e3a802fcf91dc3930aea782464ee34d9b747d3ab
https://github.com/byeongjokim/LateTemporalModeling3DCNN_for_sign/tree/e3a802fcf91dc3930aea782464ee34d9b747d3ab
判断状态
import math import torch import numpy as np import torch.nn as nn def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class 全连接层(nn.Module): def __init__(self, 输入_接口, 输出_接口): super().__init__() np.random.seed(1) self.weight = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), (输入_接口, 输出_接口)))) self.bias = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), 输出_接口))) def forward(self, x): 输出 = torch.matmul(x, self.weight) 输出 = 输出 + self.bias return 输出 class 判断状态(nn.Module): def __init__(self, 种类数, 隐藏层尺寸, 输入层尺寸=2048, 输入尺寸A=36): super().__init__() self.隐藏层尺寸 = 隐藏层尺寸 self.输入层尺寸 = 输入层尺寸 self.输入尺寸A = 输入尺寸A self.输入层 = 全连接层(输入层尺寸, 隐藏层尺寸) self.隐藏层 = 全连接层(隐藏层尺寸, 隐藏层尺寸) self.输出层 = 全连接层(隐藏层尺寸 * 输入尺寸A, 种类数) def forward(self, 图向量): 图向量 = 图向量.reshape((图向量.shape[0], self.输入尺寸A, self.输入层尺寸)) 中间量 = gelu(self.输入层(图向量)) 中间量 = self.隐藏层(中间量) 中间量 = 中间量.reshape((中间量.shape[0], self.隐藏层尺寸 * self.输入尺寸A)) 结果 = self.输出层(中间量) return 结果 def get_inputs(): return [torch.rand([4, 36, 2048])] def get_init_inputs(): return [[], {'种类数': 4, '隐藏层尺寸': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp2 * tmp2 tmp6 = tmp5 * tmp2 tmp7 = 0.044715 tmp8 = tmp6 * tmp7 tmp9 = tmp2 + tmp8 tmp10 = 0.7978845608028654 tmp11 = tmp9 * tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = 1.0 tmp14 = tmp12 + tmp13 tmp15 = tmp4 * tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 36, 2048), (73728, 2048, 1)) assert_size_stride(primals_2, (2048, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (144, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((144, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (144, 2048), (2048, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 36, 4), (144, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(576)](buf0, primals_3, buf1, 576, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((144, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (144, 4), (4, 1), 0), primals_4, out=buf2) buf3 = reinterpret_tensor(buf2, (4, 36, 4), (144, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(576)](buf3, primals_5, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (4, 144), (144, 1), 0), primals_6, alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_3, buf0, reinterpret_tensor(buf3, (144, 4), (1, 144), 0), reinterpret_tensor(primals_6, (4, 144), (1, 4), 0 ), reinterpret_tensor(buf1, (4, 144), (1, 4), 0), reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (2048, 144), (1, 2048), 0) def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class 全连接层(nn.Module): def __init__(self, 输入_接口, 输出_接口): super().__init__() np.random.seed(1) self.weight = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), (输入_接口, 输出_接口)))) self.bias = nn.Parameter(torch.FloatTensor(np.random.uniform(-1 / np.sqrt(输入_接口), 1 / np.sqrt(输入_接口), 输出_接口))) def forward(self, x): 输出 = torch.matmul(x, self.weight) 输出 = 输出 + self.bias return 输出 class 判断状态New(nn.Module): def __init__(self, 种类数, 隐藏层尺寸, 输入层尺寸=2048, 输入尺寸A=36): super().__init__() self.隐藏层尺寸 = 隐藏层尺寸 self.输入层尺寸 = 输入层尺寸 self.输入尺寸A = 输入尺寸A self.输入层 = 全连接层(输入层尺寸, 隐藏层尺寸) self.隐藏层 = 全连接层(隐藏层尺寸, 隐藏层尺寸) self.输出层 = 全连接层(隐藏层尺寸 * 输入尺寸A, 种类数) def forward(self, input_0): primals_2 = self.输入层.weight primals_3 = self.输入层.bias primals_4 = self.隐藏层.weight primals_5 = self.隐藏层.bias primals_6 = self.输出层.weight primals_7 = self.输出层.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
chenjun-110/WZCQ
判断状态
false
1,683
[ "Apache-2.0" ]
0
e2de7743ad671e8632cfa084638555d7f1deb42f
https://github.com/chenjun-110/WZCQ/tree/e2de7743ad671e8632cfa084638555d7f1deb42f
SoftDiceLoss
import torch from torch.utils.data.sampler import * import torch.nn as nn import torch.nn.functional as F class SoftDiceLoss(nn.Module): def __init__(self, weight=None, size_average=True): super(SoftDiceLoss, self).__init__() def forward(self, logits, targets): num = targets.size(0) probs = F.sigmoid(logits) m1 = probs.view(num, -1) m2 = targets.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.utils.data.sampler import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp1 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class SoftDiceLossNew(nn.Module): def __init__(self, weight=None, size_average=True): super(SoftDiceLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chicm/carvana
SoftDiceLoss
false
1,684
[ "Apache-2.0" ]
0
493a19fbb2fdab1cc1b4b95d97742684e4144229
https://github.com/chicm/carvana/tree/493a19fbb2fdab1cc1b4b95d97742684e4144229
ScaledDotProductAttention
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.checkpoint class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature log_attn = F.log_softmax(attn, 2) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn, log_attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 return buf3, buf2, buf5 class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1], output[2]
chenxiaoyu523/FEAT3D
ScaledDotProductAttention
false
1,685
[ "MIT" ]
0
ba45ba7c26628a7cc0070b010f4f33893cdac926
https://github.com/chenxiaoyu523/FEAT3D/tree/ba45ba7c26628a7cc0070b010f4f33893cdac926
BCELoss2d
import torch from torch.utils.data.sampler import * import torch.nn as nn import torch.nn.functional as F class BCELoss2d(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2d, self).__init__() self.bce_loss = nn.BCELoss(weight, size_average) def forward(self, logits, targets): probs = F.sigmoid(logits) probs_flat = probs.view(-1) targets_flat = targets.view(-1) return self.bce_loss(probs_flat, targets_flat) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.utils.data.sampler import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCELoss2dNew(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2dNew, self).__init__() self.bce_loss = nn.BCELoss(weight, size_average) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chicm/carvana
BCELoss2d
false
1,686
[ "Apache-2.0" ]
0
493a19fbb2fdab1cc1b4b95d97742684e4144229
https://github.com/chicm/carvana/tree/493a19fbb2fdab1cc1b4b95d97742684e4144229
L2Part
import torch import torch.nn as nn from itertools import chain as chain import torch.utils.data from collections import OrderedDict import torch.hub import torch.nn.parallel import torch.optim class concatLayer(nn.Module): def __init__(self, in_channels, out_channels_perSub, i, j, appendix): super(concatLayer, self).__init__() self.firstSub = self.concatLayerSub(in_channels, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0') self.secondSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1') self.thirdSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2') def forward(self, x): firstSub = self.firstSub(x) secondSub = self.secondSub(firstSub) thirdSub = self.thirdSub(secondSub) out = torch.cat([firstSub, secondSub, thirdSub], 1) return out def concatLayerSub(self, in_channels, out_channels, layerName): concatLayerSubOrdered = OrderedDict() conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) concatLayerSubOrdered.update({('Mconv' + layerName): conv2d}) concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU( out_channels)}) return nn.Sequential(concatLayerSubOrdered) class stage(nn.Module): def __init__(self, stageID, in_channels, out_channels_perSub, mid_channels, out_channels, appendix): super(stage, self).__init__() self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1, stageID, appendix) self.secondConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 2, stageID, appendix) self.thirdConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 3, stageID, appendix) self.fourthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 4, stageID, appendix) self.fifthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 5, stageID, appendix) conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels, kernel_size=1, padding=0) prelu = nn.PReLU(mid_channels) self.afterConcatsFirst = nn.Sequential(OrderedDict({( 'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, ( 'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu})) conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0 ) self.afterConcatsSecond = nn.Sequential(OrderedDict({( 'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d})) def forward(self, x): x = self.firstConcat(x) x = self.secondConcat(x) x = self.thirdConcat(x) x = self.fourthConcat(x) x = self.fifthConcat(x) x = self.afterConcatsFirst(x) out = self.afterConcatsSecond(x) return out class L2Part(nn.Module): def __init__(self, in_channels, stage_out_channels): super(L2Part, self).__init__() self.firstStage = stage(0, in_channels, 96, in_channels * 2, stage_out_channels, 'L2') self.secondStage = stage(1, in_channels + stage_out_channels, in_channels, in_channels * 4, stage_out_channels, 'L2') self.thirdStage = stage(2, in_channels + stage_out_channels, in_channels, in_channels * 4, stage_out_channels, 'L2') self.fourthStage = stage(3, in_channels + stage_out_channels, in_channels, in_channels * 4, stage_out_channels, 'L2') def forward(self, features): x = self.firstStage(features) x = torch.cat([features, x], 1) x = self.secondStage(x) x = torch.cat([features, x], 1) x = self.thirdStage(x) x = torch.cat([features, x], 1) out = self.fourthStage(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'stage_out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import chain as chain import torch.utils.data from collections import OrderedDict import torch.hub import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 288 x0 = xindex % 16 x2 = xindex // 4608 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 96, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1536 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 192, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-96 + x1) + 1536 * x2), tmp9, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 288, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-192 + x1) + 1536 * x2), tmp11, other=0.0) tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = tl.load(in_ptr3 + (-192 + x1), tmp11, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp17 * tmp14 tmp19 = tl.where(tmp16, tmp14, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp10, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x3, tmp23, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 12 x0 = xindex % 16 x2 = xindex // 192 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp11 & xmask, other=0.0) tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = tl.load(in_ptr3 + (-8 + x1), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp17 * tmp14 tmp19 = tl.where(tmp16, tmp14, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp10, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x3, tmp23, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_8(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201) = args args.clear() assert_size_stride(primals_1, (96, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (96,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (96,), (1,)) assert_size_stride(primals_5, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_6, (96,), (1,)) assert_size_stride(primals_7, (96,), (1,)) assert_size_stride(primals_8, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_9, (96,), (1,)) assert_size_stride(primals_10, (96,), (1,)) assert_size_stride(primals_11, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_12, (96,), (1,)) assert_size_stride(primals_13, (96,), (1,)) assert_size_stride(primals_14, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_15, (96,), (1,)) assert_size_stride(primals_16, (96,), (1,)) assert_size_stride(primals_17, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_18, (96,), (1,)) assert_size_stride(primals_19, (96,), (1,)) assert_size_stride(primals_20, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_21, (96,), (1,)) assert_size_stride(primals_22, (96,), (1,)) assert_size_stride(primals_23, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_24, (96,), (1,)) assert_size_stride(primals_25, (96,), (1,)) assert_size_stride(primals_26, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_27, (96,), (1,)) assert_size_stride(primals_28, (96,), (1,)) assert_size_stride(primals_29, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_30, (96,), (1,)) assert_size_stride(primals_31, (96,), (1,)) assert_size_stride(primals_32, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_33, (96,), (1,)) assert_size_stride(primals_34, (96,), (1,)) assert_size_stride(primals_35, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_36, (96,), (1,)) assert_size_stride(primals_37, (96,), (1,)) assert_size_stride(primals_38, (96, 288, 3, 3), (2592, 9, 3, 1)) assert_size_stride(primals_39, (96,), (1,)) assert_size_stride(primals_40, (96,), (1,)) assert_size_stride(primals_41, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_42, (96,), (1,)) assert_size_stride(primals_43, (96,), (1,)) assert_size_stride(primals_44, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_45, (96,), (1,)) assert_size_stride(primals_46, (96,), (1,)) assert_size_stride(primals_47, (8, 288, 1, 1), (288, 1, 1, 1)) assert_size_stride(primals_48, (8,), (1,)) assert_size_stride(primals_49, (8,), (1,)) assert_size_stride(primals_50, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_51, (4,), (1,)) assert_size_stride(primals_52, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_53, (4,), (1,)) assert_size_stride(primals_54, (4,), (1,)) assert_size_stride(primals_55, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_56, (4,), (1,)) assert_size_stride(primals_57, (4,), (1,)) assert_size_stride(primals_58, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_59, (4,), (1,)) assert_size_stride(primals_60, (4,), (1,)) assert_size_stride(primals_61, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_62, (4,), (1,)) assert_size_stride(primals_63, (4,), (1,)) assert_size_stride(primals_64, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_65, (4,), (1,)) assert_size_stride(primals_66, (4,), (1,)) assert_size_stride(primals_67, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_68, (4,), (1,)) assert_size_stride(primals_69, (4,), (1,)) assert_size_stride(primals_70, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_71, (4,), (1,)) assert_size_stride(primals_72, (4,), (1,)) assert_size_stride(primals_73, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_74, (4,), (1,)) assert_size_stride(primals_75, (4,), (1,)) assert_size_stride(primals_76, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_77, (4,), (1,)) assert_size_stride(primals_78, (4,), (1,)) assert_size_stride(primals_79, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_80, (4,), (1,)) assert_size_stride(primals_81, (4,), (1,)) assert_size_stride(primals_82, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_83, (4,), (1,)) assert_size_stride(primals_84, (4,), (1,)) assert_size_stride(primals_85, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_86, (4,), (1,)) assert_size_stride(primals_87, (4,), (1,)) assert_size_stride(primals_88, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_89, (4,), (1,)) assert_size_stride(primals_90, (4,), (1,)) assert_size_stride(primals_91, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_92, (4,), (1,)) assert_size_stride(primals_93, (4,), (1,)) assert_size_stride(primals_94, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_95, (4,), (1,)) assert_size_stride(primals_96, (4,), (1,)) assert_size_stride(primals_97, (16, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_98, (16,), (1,)) assert_size_stride(primals_99, (16,), (1,)) assert_size_stride(primals_100, (4, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_101, (4,), (1,)) assert_size_stride(primals_102, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_103, (4,), (1,)) assert_size_stride(primals_104, (4,), (1,)) assert_size_stride(primals_105, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_106, (4,), (1,)) assert_size_stride(primals_107, (4,), (1,)) assert_size_stride(primals_108, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_109, (4,), (1,)) assert_size_stride(primals_110, (4,), (1,)) assert_size_stride(primals_111, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_112, (4,), (1,)) assert_size_stride(primals_113, (4,), (1,)) assert_size_stride(primals_114, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_115, (4,), (1,)) assert_size_stride(primals_116, (4,), (1,)) assert_size_stride(primals_117, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_118, (4,), (1,)) assert_size_stride(primals_119, (4,), (1,)) assert_size_stride(primals_120, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_121, (4,), (1,)) assert_size_stride(primals_122, (4,), (1,)) assert_size_stride(primals_123, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_124, (4,), (1,)) assert_size_stride(primals_125, (4,), (1,)) assert_size_stride(primals_126, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_127, (4,), (1,)) assert_size_stride(primals_128, (4,), (1,)) assert_size_stride(primals_129, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_130, (4,), (1,)) assert_size_stride(primals_131, (4,), (1,)) assert_size_stride(primals_132, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_133, (4,), (1,)) assert_size_stride(primals_134, (4,), (1,)) assert_size_stride(primals_135, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_136, (4,), (1,)) assert_size_stride(primals_137, (4,), (1,)) assert_size_stride(primals_138, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_139, (4,), (1,)) assert_size_stride(primals_140, (4,), (1,)) assert_size_stride(primals_141, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_142, (4,), (1,)) assert_size_stride(primals_143, (4,), (1,)) assert_size_stride(primals_144, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_145, (4,), (1,)) assert_size_stride(primals_146, (4,), (1,)) assert_size_stride(primals_147, (16, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_148, (16,), (1,)) assert_size_stride(primals_149, (16,), (1,)) assert_size_stride(primals_150, (4, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_151, (4,), (1,)) assert_size_stride(primals_152, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_153, (4,), (1,)) assert_size_stride(primals_154, (4,), (1,)) assert_size_stride(primals_155, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_156, (4,), (1,)) assert_size_stride(primals_157, (4,), (1,)) assert_size_stride(primals_158, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_159, (4,), (1,)) assert_size_stride(primals_160, (4,), (1,)) assert_size_stride(primals_161, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_162, (4,), (1,)) assert_size_stride(primals_163, (4,), (1,)) assert_size_stride(primals_164, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_165, (4,), (1,)) assert_size_stride(primals_166, (4,), (1,)) assert_size_stride(primals_167, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_168, (4,), (1,)) assert_size_stride(primals_169, (4,), (1,)) assert_size_stride(primals_170, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_171, (4,), (1,)) assert_size_stride(primals_172, (4,), (1,)) assert_size_stride(primals_173, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_174, (4,), (1,)) assert_size_stride(primals_175, (4,), (1,)) assert_size_stride(primals_176, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_177, (4,), (1,)) assert_size_stride(primals_178, (4,), (1,)) assert_size_stride(primals_179, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_180, (4,), (1,)) assert_size_stride(primals_181, (4,), (1,)) assert_size_stride(primals_182, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_183, (4,), (1,)) assert_size_stride(primals_184, (4,), (1,)) assert_size_stride(primals_185, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_186, (4,), (1,)) assert_size_stride(primals_187, (4,), (1,)) assert_size_stride(primals_188, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_189, (4,), (1,)) assert_size_stride(primals_190, (4,), (1,)) assert_size_stride(primals_191, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_192, (4,), (1,)) assert_size_stride(primals_193, (4,), (1,)) assert_size_stride(primals_194, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_195, (4,), (1,)) assert_size_stride(primals_196, (4,), (1,)) assert_size_stride(primals_197, (16, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_198, (16,), (1,)) assert_size_stride(primals_199, (16,), (1,)) assert_size_stride(primals_200, (4, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_201, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 4, 4), (1536, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf1, primals_2, primals_4, buf2, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 96, 4, 4), (1536, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf4, primals_6, primals_7, buf5, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 96, 4, 4), (1536, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_1[grid(6144)](buf7, primals_9, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(18432)](buf2, buf5, buf7, primals_10, buf8, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 96, 4, 4), (1536, 16, 4, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf10, primals_12, primals_13, buf11, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_12 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 96, 4, 4), (1536, 16, 4, 1)) buf13 = buf12 del buf12 buf14 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf13, primals_15, primals_16, buf14, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 96, 4, 4), (1536, 16, 4, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_1[grid(6144)](buf16, primals_18, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_18 buf17 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(18432)](buf11, buf14, buf16, primals_19, buf17, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 96, 4, 4), (1536, 16, 4, 1)) buf19 = buf18 del buf18 buf20 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf19, primals_21, primals_22, buf20, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf21 = extern_kernels.convolution(buf20, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 96, 4, 4), (1536, 16, 4, 1)) buf22 = buf21 del buf21 buf23 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf22, primals_24, primals_25, buf23, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_24 buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 96, 4, 4), (1536, 16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_1[grid(6144)](buf25, primals_27, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_27 buf26 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(18432)](buf20, buf23, buf25, primals_28, buf26, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf27 = extern_kernels.convolution(buf26, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 96, 4, 4), (1536, 16, 4, 1)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf28, primals_30, primals_31, buf29, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_30 buf30 = extern_kernels.convolution(buf29, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 96, 4, 4), (1536, 16, 4, 1)) buf31 = buf30 del buf30 buf32 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf31, primals_33, primals_34, buf32, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_33 buf33 = extern_kernels.convolution(buf32, primals_35, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 96, 4, 4), (1536, 16, 4, 1)) buf34 = buf33 del buf33 triton_poi_fused_convolution_1[grid(6144)](buf34, primals_36, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_36 buf35 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(18432)](buf29, buf32, buf34, primals_37, buf35, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 96, 4, 4), (1536, 16, 4, 1)) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf37, primals_39, primals_40, buf38, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_39 buf39 = extern_kernels.convolution(buf38, primals_41, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 96, 4, 4), (1536, 16, 4, 1)) buf40 = buf39 del buf39 buf41 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf40, primals_42, primals_43, buf41, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_42 buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 96, 4, 4), (1536, 16, 4, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_1[grid(6144)](buf43, primals_45, 6144, XBLOCK=128, num_warps=4, num_stages=1) del primals_45 buf44 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(18432)](buf38, buf41, buf43, primals_46, buf44, 18432, XBLOCK=128, num_warps=4, num_stages=1) buf45 = extern_kernels.convolution(buf44, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 8, 4, 4), (128, 16, 4, 1)) buf46 = buf45 del buf45 buf47 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_3[grid(512)](buf46, primals_48, primals_49, buf47, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_48 buf48 = extern_kernels.convolution(buf47, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 4, 4, 4), (64, 16, 4, 1)) buf49 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_4[grid(512)](primals_3, buf48, primals_51, buf49, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_51 buf50 = extern_kernels.convolution(buf49, primals_52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 4, 4, 4), (64, 16, 4, 1)) buf51 = buf50 del buf50 buf52 = buf48 del buf48 triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf51, primals_53, primals_54, buf52, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_53 buf53 = extern_kernels.convolution(buf52, primals_55, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 4, 4, 4), (64, 16, 4, 1)) buf54 = buf53 del buf53 buf55 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf54, primals_56, primals_57, buf55, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_56 buf56 = extern_kernels.convolution(buf55, primals_58, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 4, 4, 4), (64, 16, 4, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_6[grid(256)](buf57, primals_59, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_59 buf58 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf52, buf55, buf57, primals_60, buf58, 768, XBLOCK=128, num_warps=4, num_stages=1) buf59 = extern_kernels.convolution(buf58, primals_61, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 4, 4, 4), (64, 16, 4, 1)) buf60 = buf59 del buf59 buf61 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf60, primals_62, primals_63, buf61, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_62 buf62 = extern_kernels.convolution(buf61, primals_64, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 4, 4, 4), (64, 16, 4, 1)) buf63 = buf62 del buf62 buf64 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf63, primals_65, primals_66, buf64, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_65 buf65 = extern_kernels.convolution(buf64, primals_67, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 4, 4, 4), (64, 16, 4, 1)) buf66 = buf65 del buf65 triton_poi_fused_convolution_6[grid(256)](buf66, primals_68, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_68 buf67 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf61, buf64, buf66, primals_69, buf67, 768, XBLOCK=128, num_warps=4, num_stages=1) buf68 = extern_kernels.convolution(buf67, primals_70, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 4, 4, 4), (64, 16, 4, 1)) buf69 = buf68 del buf68 buf70 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf69, primals_71, primals_72, buf70, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_71 buf71 = extern_kernels.convolution(buf70, primals_73, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf71, (4, 4, 4, 4), (64, 16, 4, 1)) buf72 = buf71 del buf71 buf73 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf72, primals_74, primals_75, buf73, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_74 buf74 = extern_kernels.convolution(buf73, primals_76, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf74, (4, 4, 4, 4), (64, 16, 4, 1)) buf75 = buf74 del buf74 triton_poi_fused_convolution_6[grid(256)](buf75, primals_77, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_77 buf76 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf70, buf73, buf75, primals_78, buf76, 768, XBLOCK=128, num_warps=4, num_stages=1) buf77 = extern_kernels.convolution(buf76, primals_79, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf77, (4, 4, 4, 4), (64, 16, 4, 1)) buf78 = buf77 del buf77 buf79 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf78, primals_80, primals_81, buf79, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_80 buf80 = extern_kernels.convolution(buf79, primals_82, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf80, (4, 4, 4, 4), (64, 16, 4, 1)) buf81 = buf80 del buf80 buf82 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf81, primals_83, primals_84, buf82, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_83 buf83 = extern_kernels.convolution(buf82, primals_85, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 4, 4, 4), (64, 16, 4, 1)) buf84 = buf83 del buf83 triton_poi_fused_convolution_6[grid(256)](buf84, primals_86, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_86 buf85 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf79, buf82, buf84, primals_87, buf85, 768, XBLOCK=128, num_warps=4, num_stages=1) buf86 = extern_kernels.convolution(buf85, primals_88, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 4, 4, 4), (64, 16, 4, 1)) buf87 = buf86 del buf86 buf88 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf87, primals_89, primals_90, buf88, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_89 buf89 = extern_kernels.convolution(buf88, primals_91, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf89, (4, 4, 4, 4), (64, 16, 4, 1)) buf90 = buf89 del buf89 buf91 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf90, primals_92, primals_93, buf91, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_92 buf92 = extern_kernels.convolution(buf91, primals_94, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf92, (4, 4, 4, 4), (64, 16, 4, 1)) buf93 = buf92 del buf92 triton_poi_fused_convolution_6[grid(256)](buf93, primals_95, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_95 buf94 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf88, buf91, buf93, primals_96, buf94, 768, XBLOCK=128, num_warps=4, num_stages=1) buf95 = extern_kernels.convolution(buf94, primals_97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf95, (4, 16, 4, 4), (256, 16, 4, 1)) buf96 = buf95 del buf95 buf97 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_8[grid(1024)](buf96, primals_98, primals_99, buf97, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_98 buf98 = extern_kernels.convolution(buf97, primals_100, stride=(1, 1 ), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf98, (4, 4, 4, 4), (64, 16, 4, 1)) buf99 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_4[grid(512)](primals_3, buf98, primals_101, buf99, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_101 buf100 = extern_kernels.convolution(buf99, primals_102, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf100, (4, 4, 4, 4), (64, 16, 4, 1)) buf101 = buf100 del buf100 buf102 = buf98 del buf98 triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf101, primals_103, primals_104, buf102, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_103 buf103 = extern_kernels.convolution(buf102, primals_105, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf103, (4, 4, 4, 4), (64, 16, 4, 1)) buf104 = buf103 del buf103 buf105 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf104, primals_106, primals_107, buf105, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_106 buf106 = extern_kernels.convolution(buf105, primals_108, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf106, (4, 4, 4, 4), (64, 16, 4, 1)) buf107 = buf106 del buf106 triton_poi_fused_convolution_6[grid(256)](buf107, primals_109, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_109 buf108 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf102, buf105, buf107, primals_110, buf108, 768, XBLOCK=128, num_warps=4, num_stages=1) buf109 = extern_kernels.convolution(buf108, primals_111, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 4, 4, 4), (64, 16, 4, 1)) buf110 = buf109 del buf109 buf111 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf110, primals_112, primals_113, buf111, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_112 buf112 = extern_kernels.convolution(buf111, primals_114, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf112, (4, 4, 4, 4), (64, 16, 4, 1)) buf113 = buf112 del buf112 buf114 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf113, primals_115, primals_116, buf114, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_115 buf115 = extern_kernels.convolution(buf114, primals_117, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf115, (4, 4, 4, 4), (64, 16, 4, 1)) buf116 = buf115 del buf115 triton_poi_fused_convolution_6[grid(256)](buf116, primals_118, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_118 buf117 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf111, buf114, buf116, primals_119, buf117, 768, XBLOCK=128, num_warps=4, num_stages=1) buf118 = extern_kernels.convolution(buf117, primals_120, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 4, 4, 4), (64, 16, 4, 1)) buf119 = buf118 del buf118 buf120 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf119, primals_121, primals_122, buf120, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_121 buf121 = extern_kernels.convolution(buf120, primals_123, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf121, (4, 4, 4, 4), (64, 16, 4, 1)) buf122 = buf121 del buf121 buf123 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf122, primals_124, primals_125, buf123, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_124 buf124 = extern_kernels.convolution(buf123, primals_126, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf124, (4, 4, 4, 4), (64, 16, 4, 1)) buf125 = buf124 del buf124 triton_poi_fused_convolution_6[grid(256)](buf125, primals_127, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_127 buf126 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf120, buf123, buf125, primals_128, buf126, 768, XBLOCK=128, num_warps=4, num_stages=1) buf127 = extern_kernels.convolution(buf126, primals_129, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf127, (4, 4, 4, 4), (64, 16, 4, 1)) buf128 = buf127 del buf127 buf129 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf128, primals_130, primals_131, buf129, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_130 buf130 = extern_kernels.convolution(buf129, primals_132, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf130, (4, 4, 4, 4), (64, 16, 4, 1)) buf131 = buf130 del buf130 buf132 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf131, primals_133, primals_134, buf132, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_133 buf133 = extern_kernels.convolution(buf132, primals_135, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf133, (4, 4, 4, 4), (64, 16, 4, 1)) buf134 = buf133 del buf133 triton_poi_fused_convolution_6[grid(256)](buf134, primals_136, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_136 buf135 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf129, buf132, buf134, primals_137, buf135, 768, XBLOCK=128, num_warps=4, num_stages=1) buf136 = extern_kernels.convolution(buf135, primals_138, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf136, (4, 4, 4, 4), (64, 16, 4, 1)) buf137 = buf136 del buf136 buf138 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf137, primals_139, primals_140, buf138, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_139 buf139 = extern_kernels.convolution(buf138, primals_141, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf139, (4, 4, 4, 4), (64, 16, 4, 1)) buf140 = buf139 del buf139 buf141 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf140, primals_142, primals_143, buf141, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_142 buf142 = extern_kernels.convolution(buf141, primals_144, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 4, 4, 4), (64, 16, 4, 1)) buf143 = buf142 del buf142 triton_poi_fused_convolution_6[grid(256)](buf143, primals_145, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_145 buf144 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf138, buf141, buf143, primals_146, buf144, 768, XBLOCK=128, num_warps=4, num_stages=1) buf145 = extern_kernels.convolution(buf144, primals_147, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf145, (4, 16, 4, 4), (256, 16, 4, 1)) buf146 = buf145 del buf145 buf147 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_8[grid(1024)](buf146, primals_148, primals_149, buf147, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_148 buf148 = extern_kernels.convolution(buf147, primals_150, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf148, (4, 4, 4, 4), (64, 16, 4, 1)) buf149 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch. float32) triton_poi_fused_cat_4[grid(512)](primals_3, buf148, primals_151, buf149, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_151 buf150 = extern_kernels.convolution(buf149, primals_152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf150, (4, 4, 4, 4), (64, 16, 4, 1)) buf151 = buf150 del buf150 buf152 = buf148 del buf148 triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf151, primals_153, primals_154, buf152, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_153 buf153 = extern_kernels.convolution(buf152, primals_155, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 4, 4, 4), (64, 16, 4, 1)) buf154 = buf153 del buf153 buf155 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf154, primals_156, primals_157, buf155, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_156 buf156 = extern_kernels.convolution(buf155, primals_158, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf156, (4, 4, 4, 4), (64, 16, 4, 1)) buf157 = buf156 del buf156 triton_poi_fused_convolution_6[grid(256)](buf157, primals_159, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_159 buf158 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf152, buf155, buf157, primals_160, buf158, 768, XBLOCK=128, num_warps=4, num_stages=1) buf159 = extern_kernels.convolution(buf158, primals_161, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf159, (4, 4, 4, 4), (64, 16, 4, 1)) buf160 = buf159 del buf159 buf161 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf160, primals_162, primals_163, buf161, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_162 buf162 = extern_kernels.convolution(buf161, primals_164, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf162, (4, 4, 4, 4), (64, 16, 4, 1)) buf163 = buf162 del buf162 buf164 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf163, primals_165, primals_166, buf164, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_165 buf165 = extern_kernels.convolution(buf164, primals_167, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf165, (4, 4, 4, 4), (64, 16, 4, 1)) buf166 = buf165 del buf165 triton_poi_fused_convolution_6[grid(256)](buf166, primals_168, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_168 buf167 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf161, buf164, buf166, primals_169, buf167, 768, XBLOCK=128, num_warps=4, num_stages=1) buf168 = extern_kernels.convolution(buf167, primals_170, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf168, (4, 4, 4, 4), (64, 16, 4, 1)) buf169 = buf168 del buf168 buf170 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf169, primals_171, primals_172, buf170, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_171 buf171 = extern_kernels.convolution(buf170, primals_173, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf171, (4, 4, 4, 4), (64, 16, 4, 1)) buf172 = buf171 del buf171 buf173 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf172, primals_174, primals_175, buf173, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_174 buf174 = extern_kernels.convolution(buf173, primals_176, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf174, (4, 4, 4, 4), (64, 16, 4, 1)) buf175 = buf174 del buf174 triton_poi_fused_convolution_6[grid(256)](buf175, primals_177, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_177 buf176 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf170, buf173, buf175, primals_178, buf176, 768, XBLOCK=128, num_warps=4, num_stages=1) buf177 = extern_kernels.convolution(buf176, primals_179, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf177, (4, 4, 4, 4), (64, 16, 4, 1)) buf178 = buf177 del buf177 buf179 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf178, primals_180, primals_181, buf179, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_180 buf180 = extern_kernels.convolution(buf179, primals_182, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf180, (4, 4, 4, 4), (64, 16, 4, 1)) buf181 = buf180 del buf180 buf182 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf181, primals_183, primals_184, buf182, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_183 buf183 = extern_kernels.convolution(buf182, primals_185, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf183, (4, 4, 4, 4), (64, 16, 4, 1)) buf184 = buf183 del buf183 triton_poi_fused_convolution_6[grid(256)](buf184, primals_186, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_186 buf185 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf179, buf182, buf184, primals_187, buf185, 768, XBLOCK=128, num_warps=4, num_stages=1) buf186 = extern_kernels.convolution(buf185, primals_188, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf186, (4, 4, 4, 4), (64, 16, 4, 1)) buf187 = buf186 del buf186 buf188 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf187, primals_189, primals_190, buf188, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_189 buf189 = extern_kernels.convolution(buf188, primals_191, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf189, (4, 4, 4, 4), (64, 16, 4, 1)) buf190 = buf189 del buf189 buf191 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf190, primals_192, primals_193, buf191, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_192 buf192 = extern_kernels.convolution(buf191, primals_194, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf192, (4, 4, 4, 4), (64, 16, 4, 1)) buf193 = buf192 del buf192 triton_poi_fused_convolution_6[grid(256)](buf193, primals_195, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_195 buf194 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_7[grid(768)](buf188, buf191, buf193, primals_196, buf194, 768, XBLOCK=128, num_warps=4, num_stages=1) buf195 = extern_kernels.convolution(buf194, primals_197, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf195, (4, 16, 4, 4), (256, 16, 4, 1)) buf196 = buf195 del buf195 buf197 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_8[grid(1024)](buf196, primals_198, primals_199, buf197, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_198 buf198 = extern_kernels.convolution(buf197, primals_200, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf198, (4, 4, 4, 4), (64, 16, 4, 1)) buf199 = buf198 del buf198 triton_poi_fused_convolution_6[grid(256)](buf199, primals_201, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_201 return (buf199, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, primals_14, primals_16, primals_17, primals_19, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, primals_29, primals_31, primals_32, primals_34, primals_35, primals_37, primals_38, primals_40, primals_41, primals_43, primals_44, primals_46, primals_47, primals_49, primals_50, primals_52, primals_54, primals_55, primals_57, primals_58, primals_60, primals_61, primals_63, primals_64, primals_66, primals_67, primals_69, primals_70, primals_72, primals_73, primals_75, primals_76, primals_78, primals_79, primals_81, primals_82, primals_84, primals_85, primals_87, primals_88, primals_90, primals_91, primals_93, primals_94, primals_96, primals_97, primals_99, primals_100, primals_102, primals_104, primals_105, primals_107, primals_108, primals_110, primals_111, primals_113, primals_114, primals_116, primals_117, primals_119, primals_120, primals_122, primals_123, primals_125, primals_126, primals_128, primals_129, primals_131, primals_132, primals_134, primals_135, primals_137, primals_138, primals_140, primals_141, primals_143, primals_144, primals_146, primals_147, primals_149, primals_150, primals_152, primals_154, primals_155, primals_157, primals_158, primals_160, primals_161, primals_163, primals_164, primals_166, primals_167, primals_169, primals_170, primals_172, primals_173, primals_175, primals_176, primals_178, primals_179, primals_181, primals_182, primals_184, primals_185, primals_187, primals_188, primals_190, primals_191, primals_193, primals_194, primals_196, primals_197, primals_199, primals_200, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46, buf47, buf49, buf51, buf52, buf54, buf55, buf57, buf58, buf60, buf61, buf63, buf64, buf66, buf67, buf69, buf70, buf72, buf73, buf75, buf76, buf78, buf79, buf81, buf82, buf84, buf85, buf87, buf88, buf90, buf91, buf93, buf94, buf96, buf97, buf99, buf101, buf102, buf104, buf105, buf107, buf108, buf110, buf111, buf113, buf114, buf116, buf117, buf119, buf120, buf122, buf123, buf125, buf126, buf128, buf129, buf131, buf132, buf134, buf135, buf137, buf138, buf140, buf141, buf143, buf144, buf146, buf147, buf149, buf151, buf152, buf154, buf155, buf157, buf158, buf160, buf161, buf163, buf164, buf166, buf167, buf169, buf170, buf172, buf173, buf175, buf176, buf178, buf179, buf181, buf182, buf184, buf185, buf187, buf188, buf190, buf191, buf193, buf194, buf196, buf197) class concatLayer(nn.Module): def __init__(self, in_channels, out_channels_perSub, i, j, appendix): super(concatLayer, self).__init__() self.firstSub = self.concatLayerSub(in_channels, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0') self.secondSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1') self.thirdSub = self.concatLayerSub(out_channels_perSub, out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2') def forward(self, x): firstSub = self.firstSub(x) secondSub = self.secondSub(firstSub) thirdSub = self.thirdSub(secondSub) out = torch.cat([firstSub, secondSub, thirdSub], 1) return out def concatLayerSub(self, in_channels, out_channels, layerName): concatLayerSubOrdered = OrderedDict() conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) concatLayerSubOrdered.update({('Mconv' + layerName): conv2d}) concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU( out_channels)}) return nn.Sequential(concatLayerSubOrdered) class stage(nn.Module): def __init__(self, stageID, in_channels, out_channels_perSub, mid_channels, out_channels, appendix): super(stage, self).__init__() self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1, stageID, appendix) self.secondConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 2, stageID, appendix) self.thirdConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 3, stageID, appendix) self.fourthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 4, stageID, appendix) self.fifthConcat = concatLayer(3 * out_channels_perSub, out_channels_perSub, 5, stageID, appendix) conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels, kernel_size=1, padding=0) prelu = nn.PReLU(mid_channels) self.afterConcatsFirst = nn.Sequential(OrderedDict({( 'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, ( 'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu})) conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0 ) self.afterConcatsSecond = nn.Sequential(OrderedDict({( 'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d})) def forward(self, x): x = self.firstConcat(x) x = self.secondConcat(x) x = self.thirdConcat(x) x = self.fourthConcat(x) x = self.fifthConcat(x) x = self.afterConcatsFirst(x) out = self.afterConcatsSecond(x) return out class L2PartNew(nn.Module): def __init__(self, in_channels, stage_out_channels): super(L2PartNew, self).__init__() self.firstStage = stage(0, in_channels, 96, in_channels * 2, stage_out_channels, 'L2') self.secondStage = stage(1, in_channels + stage_out_channels, in_channels, in_channels * 4, stage_out_channels, 'L2') self.thirdStage = stage(2, in_channels + stage_out_channels, in_channels, in_channels * 4, stage_out_channels, 'L2') self.fourthStage = stage(3, in_channels + stage_out_channels, in_channels, in_channels * 4, stage_out_channels, 'L2') def forward(self, input_0): primals_1 = (self.firstStage.firstConcat.firstSub. Mconv1_stage0_L2_0.weight) primals_2 = (self.firstStage.firstConcat.firstSub. Mconv1_stage0_L2_0.bias) primals_4 = (self.firstStage.firstConcat.firstSub. Mprelu1_stage0_L2_0.weight) primals_5 = (self.firstStage.firstConcat.secondSub. Mconv1_stage0_L2_1.weight) primals_6 = (self.firstStage.firstConcat.secondSub. Mconv1_stage0_L2_1.bias) primals_7 = (self.firstStage.firstConcat.secondSub. Mprelu1_stage0_L2_1.weight) primals_8 = (self.firstStage.firstConcat.thirdSub. Mconv1_stage0_L2_2.weight) primals_9 = (self.firstStage.firstConcat.thirdSub. Mconv1_stage0_L2_2.bias) primals_10 = (self.firstStage.firstConcat.thirdSub. Mprelu1_stage0_L2_2.weight) primals_11 = (self.firstStage.secondConcat.firstSub. Mconv2_stage0_L2_0.weight) primals_12 = (self.firstStage.secondConcat.firstSub. Mconv2_stage0_L2_0.bias) primals_13 = (self.firstStage.secondConcat.firstSub. Mprelu2_stage0_L2_0.weight) primals_14 = (self.firstStage.secondConcat.secondSub. Mconv2_stage0_L2_1.weight) primals_15 = (self.firstStage.secondConcat.secondSub. Mconv2_stage0_L2_1.bias) primals_16 = (self.firstStage.secondConcat.secondSub. Mprelu2_stage0_L2_1.weight) primals_17 = (self.firstStage.secondConcat.thirdSub. Mconv2_stage0_L2_2.weight) primals_18 = (self.firstStage.secondConcat.thirdSub. Mconv2_stage0_L2_2.bias) primals_19 = (self.firstStage.secondConcat.thirdSub. Mprelu2_stage0_L2_2.weight) primals_20 = (self.firstStage.thirdConcat.firstSub. Mconv3_stage0_L2_0.weight) primals_21 = (self.firstStage.thirdConcat.firstSub. Mconv3_stage0_L2_0.bias) primals_22 = (self.firstStage.thirdConcat.firstSub. Mprelu3_stage0_L2_0.weight) primals_23 = (self.firstStage.thirdConcat.secondSub. Mconv3_stage0_L2_1.weight) primals_24 = (self.firstStage.thirdConcat.secondSub. Mconv3_stage0_L2_1.bias) primals_25 = (self.firstStage.thirdConcat.secondSub. Mprelu3_stage0_L2_1.weight) primals_26 = (self.firstStage.thirdConcat.thirdSub. Mconv3_stage0_L2_2.weight) primals_27 = (self.firstStage.thirdConcat.thirdSub. Mconv3_stage0_L2_2.bias) primals_28 = (self.firstStage.thirdConcat.thirdSub. Mprelu3_stage0_L2_2.weight) primals_29 = (self.firstStage.fourthConcat.firstSub. Mconv4_stage0_L2_0.weight) primals_30 = (self.firstStage.fourthConcat.firstSub. Mconv4_stage0_L2_0.bias) primals_31 = (self.firstStage.fourthConcat.firstSub. Mprelu4_stage0_L2_0.weight) primals_32 = (self.firstStage.fourthConcat.secondSub. Mconv4_stage0_L2_1.weight) primals_33 = (self.firstStage.fourthConcat.secondSub. Mconv4_stage0_L2_1.bias) primals_34 = (self.firstStage.fourthConcat.secondSub. Mprelu4_stage0_L2_1.weight) primals_35 = (self.firstStage.fourthConcat.thirdSub. Mconv4_stage0_L2_2.weight) primals_36 = (self.firstStage.fourthConcat.thirdSub. Mconv4_stage0_L2_2.bias) primals_37 = (self.firstStage.fourthConcat.thirdSub. Mprelu4_stage0_L2_2.weight) primals_38 = (self.firstStage.fifthConcat.firstSub. Mconv5_stage0_L2_0.weight) primals_39 = (self.firstStage.fifthConcat.firstSub. Mconv5_stage0_L2_0.bias) primals_40 = (self.firstStage.fifthConcat.firstSub. Mprelu5_stage0_L2_0.weight) primals_41 = (self.firstStage.fifthConcat.secondSub. Mconv5_stage0_L2_1.weight) primals_42 = (self.firstStage.fifthConcat.secondSub. Mconv5_stage0_L2_1.bias) primals_43 = (self.firstStage.fifthConcat.secondSub. Mprelu5_stage0_L2_1.weight) primals_44 = (self.firstStage.fifthConcat.thirdSub. Mconv5_stage0_L2_2.weight) primals_45 = (self.firstStage.fifthConcat.thirdSub. Mconv5_stage0_L2_2.bias) primals_46 = (self.firstStage.fifthConcat.thirdSub. Mprelu5_stage0_L2_2.weight) primals_47 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L2.weight primals_48 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L2.bias primals_49 = self.firstStage.afterConcatsFirst.Mprelu6_stage0_L2.weight primals_50 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L2.weight primals_51 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L2.bias primals_52 = (self.secondStage.firstConcat.firstSub. Mconv1_stage1_L2_0.weight) primals_53 = (self.secondStage.firstConcat.firstSub. Mconv1_stage1_L2_0.bias) primals_54 = (self.secondStage.firstConcat.firstSub. Mprelu1_stage1_L2_0.weight) primals_55 = (self.secondStage.firstConcat.secondSub. Mconv1_stage1_L2_1.weight) primals_56 = (self.secondStage.firstConcat.secondSub. Mconv1_stage1_L2_1.bias) primals_57 = (self.secondStage.firstConcat.secondSub. Mprelu1_stage1_L2_1.weight) primals_58 = (self.secondStage.firstConcat.thirdSub. Mconv1_stage1_L2_2.weight) primals_59 = (self.secondStage.firstConcat.thirdSub. Mconv1_stage1_L2_2.bias) primals_60 = (self.secondStage.firstConcat.thirdSub. Mprelu1_stage1_L2_2.weight) primals_61 = (self.secondStage.secondConcat.firstSub. Mconv2_stage1_L2_0.weight) primals_62 = (self.secondStage.secondConcat.firstSub. Mconv2_stage1_L2_0.bias) primals_63 = (self.secondStage.secondConcat.firstSub. Mprelu2_stage1_L2_0.weight) primals_64 = (self.secondStage.secondConcat.secondSub. Mconv2_stage1_L2_1.weight) primals_65 = (self.secondStage.secondConcat.secondSub. Mconv2_stage1_L2_1.bias) primals_66 = (self.secondStage.secondConcat.secondSub. Mprelu2_stage1_L2_1.weight) primals_67 = (self.secondStage.secondConcat.thirdSub. Mconv2_stage1_L2_2.weight) primals_68 = (self.secondStage.secondConcat.thirdSub. Mconv2_stage1_L2_2.bias) primals_69 = (self.secondStage.secondConcat.thirdSub. Mprelu2_stage1_L2_2.weight) primals_70 = (self.secondStage.thirdConcat.firstSub. Mconv3_stage1_L2_0.weight) primals_71 = (self.secondStage.thirdConcat.firstSub. Mconv3_stage1_L2_0.bias) primals_72 = (self.secondStage.thirdConcat.firstSub. Mprelu3_stage1_L2_0.weight) primals_73 = (self.secondStage.thirdConcat.secondSub. Mconv3_stage1_L2_1.weight) primals_74 = (self.secondStage.thirdConcat.secondSub. Mconv3_stage1_L2_1.bias) primals_75 = (self.secondStage.thirdConcat.secondSub. Mprelu3_stage1_L2_1.weight) primals_76 = (self.secondStage.thirdConcat.thirdSub. Mconv3_stage1_L2_2.weight) primals_77 = (self.secondStage.thirdConcat.thirdSub. Mconv3_stage1_L2_2.bias) primals_78 = (self.secondStage.thirdConcat.thirdSub. Mprelu3_stage1_L2_2.weight) primals_79 = (self.secondStage.fourthConcat.firstSub. Mconv4_stage1_L2_0.weight) primals_80 = (self.secondStage.fourthConcat.firstSub. Mconv4_stage1_L2_0.bias) primals_81 = (self.secondStage.fourthConcat.firstSub. Mprelu4_stage1_L2_0.weight) primals_82 = (self.secondStage.fourthConcat.secondSub. Mconv4_stage1_L2_1.weight) primals_83 = (self.secondStage.fourthConcat.secondSub. Mconv4_stage1_L2_1.bias) primals_84 = (self.secondStage.fourthConcat.secondSub. Mprelu4_stage1_L2_1.weight) primals_85 = (self.secondStage.fourthConcat.thirdSub. Mconv4_stage1_L2_2.weight) primals_86 = (self.secondStage.fourthConcat.thirdSub. Mconv4_stage1_L2_2.bias) primals_87 = (self.secondStage.fourthConcat.thirdSub. Mprelu4_stage1_L2_2.weight) primals_88 = (self.secondStage.fifthConcat.firstSub. Mconv5_stage1_L2_0.weight) primals_89 = (self.secondStage.fifthConcat.firstSub. Mconv5_stage1_L2_0.bias) primals_90 = (self.secondStage.fifthConcat.firstSub. Mprelu5_stage1_L2_0.weight) primals_91 = (self.secondStage.fifthConcat.secondSub. Mconv5_stage1_L2_1.weight) primals_92 = (self.secondStage.fifthConcat.secondSub. Mconv5_stage1_L2_1.bias) primals_93 = (self.secondStage.fifthConcat.secondSub. Mprelu5_stage1_L2_1.weight) primals_94 = (self.secondStage.fifthConcat.thirdSub. Mconv5_stage1_L2_2.weight) primals_95 = (self.secondStage.fifthConcat.thirdSub. Mconv5_stage1_L2_2.bias) primals_96 = (self.secondStage.fifthConcat.thirdSub. Mprelu5_stage1_L2_2.weight) primals_97 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L2.weight primals_98 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L2.bias primals_99 = (self.secondStage.afterConcatsFirst.Mprelu6_stage1_L2. weight) primals_100 = (self.secondStage.afterConcatsSecond.Mconv7_stage1_L2 .weight) primals_101 = self.secondStage.afterConcatsSecond.Mconv7_stage1_L2.bias primals_102 = (self.thirdStage.firstConcat.firstSub. Mconv1_stage2_L2_0.weight) primals_103 = (self.thirdStage.firstConcat.firstSub. Mconv1_stage2_L2_0.bias) primals_104 = (self.thirdStage.firstConcat.firstSub. Mprelu1_stage2_L2_0.weight) primals_105 = (self.thirdStage.firstConcat.secondSub. Mconv1_stage2_L2_1.weight) primals_106 = (self.thirdStage.firstConcat.secondSub. Mconv1_stage2_L2_1.bias) primals_107 = (self.thirdStage.firstConcat.secondSub. Mprelu1_stage2_L2_1.weight) primals_108 = (self.thirdStage.firstConcat.thirdSub. Mconv1_stage2_L2_2.weight) primals_109 = (self.thirdStage.firstConcat.thirdSub. Mconv1_stage2_L2_2.bias) primals_110 = (self.thirdStage.firstConcat.thirdSub. Mprelu1_stage2_L2_2.weight) primals_111 = (self.thirdStage.secondConcat.firstSub. Mconv2_stage2_L2_0.weight) primals_112 = (self.thirdStage.secondConcat.firstSub. Mconv2_stage2_L2_0.bias) primals_113 = (self.thirdStage.secondConcat.firstSub. Mprelu2_stage2_L2_0.weight) primals_114 = (self.thirdStage.secondConcat.secondSub. Mconv2_stage2_L2_1.weight) primals_115 = (self.thirdStage.secondConcat.secondSub. Mconv2_stage2_L2_1.bias) primals_116 = (self.thirdStage.secondConcat.secondSub. Mprelu2_stage2_L2_1.weight) primals_117 = (self.thirdStage.secondConcat.thirdSub. Mconv2_stage2_L2_2.weight) primals_118 = (self.thirdStage.secondConcat.thirdSub. Mconv2_stage2_L2_2.bias) primals_119 = (self.thirdStage.secondConcat.thirdSub. Mprelu2_stage2_L2_2.weight) primals_120 = (self.thirdStage.thirdConcat.firstSub. Mconv3_stage2_L2_0.weight) primals_121 = (self.thirdStage.thirdConcat.firstSub. Mconv3_stage2_L2_0.bias) primals_122 = (self.thirdStage.thirdConcat.firstSub. Mprelu3_stage2_L2_0.weight) primals_123 = (self.thirdStage.thirdConcat.secondSub. Mconv3_stage2_L2_1.weight) primals_124 = (self.thirdStage.thirdConcat.secondSub. Mconv3_stage2_L2_1.bias) primals_125 = (self.thirdStage.thirdConcat.secondSub. Mprelu3_stage2_L2_1.weight) primals_126 = (self.thirdStage.thirdConcat.thirdSub. Mconv3_stage2_L2_2.weight) primals_127 = (self.thirdStage.thirdConcat.thirdSub. Mconv3_stage2_L2_2.bias) primals_128 = (self.thirdStage.thirdConcat.thirdSub. Mprelu3_stage2_L2_2.weight) primals_129 = (self.thirdStage.fourthConcat.firstSub. Mconv4_stage2_L2_0.weight) primals_130 = (self.thirdStage.fourthConcat.firstSub. Mconv4_stage2_L2_0.bias) primals_131 = (self.thirdStage.fourthConcat.firstSub. Mprelu4_stage2_L2_0.weight) primals_132 = (self.thirdStage.fourthConcat.secondSub. Mconv4_stage2_L2_1.weight) primals_133 = (self.thirdStage.fourthConcat.secondSub. Mconv4_stage2_L2_1.bias) primals_134 = (self.thirdStage.fourthConcat.secondSub. Mprelu4_stage2_L2_1.weight) primals_135 = (self.thirdStage.fourthConcat.thirdSub. Mconv4_stage2_L2_2.weight) primals_136 = (self.thirdStage.fourthConcat.thirdSub. Mconv4_stage2_L2_2.bias) primals_137 = (self.thirdStage.fourthConcat.thirdSub. Mprelu4_stage2_L2_2.weight) primals_138 = (self.thirdStage.fifthConcat.firstSub. Mconv5_stage2_L2_0.weight) primals_139 = (self.thirdStage.fifthConcat.firstSub. Mconv5_stage2_L2_0.bias) primals_140 = (self.thirdStage.fifthConcat.firstSub. Mprelu5_stage2_L2_0.weight) primals_141 = (self.thirdStage.fifthConcat.secondSub. Mconv5_stage2_L2_1.weight) primals_142 = (self.thirdStage.fifthConcat.secondSub. Mconv5_stage2_L2_1.bias) primals_143 = (self.thirdStage.fifthConcat.secondSub. Mprelu5_stage2_L2_1.weight) primals_144 = (self.thirdStage.fifthConcat.thirdSub. Mconv5_stage2_L2_2.weight) primals_145 = (self.thirdStage.fifthConcat.thirdSub. Mconv5_stage2_L2_2.bias) primals_146 = (self.thirdStage.fifthConcat.thirdSub. Mprelu5_stage2_L2_2.weight) primals_147 = self.thirdStage.afterConcatsFirst.Mconv6_stage2_L2.weight primals_148 = self.thirdStage.afterConcatsFirst.Mconv6_stage2_L2.bias primals_149 = (self.thirdStage.afterConcatsFirst.Mprelu6_stage2_L2. weight) primals_150 = (self.thirdStage.afterConcatsSecond.Mconv7_stage2_L2. weight) primals_151 = self.thirdStage.afterConcatsSecond.Mconv7_stage2_L2.bias primals_152 = (self.fourthStage.firstConcat.firstSub. Mconv1_stage3_L2_0.weight) primals_153 = (self.fourthStage.firstConcat.firstSub. Mconv1_stage3_L2_0.bias) primals_154 = (self.fourthStage.firstConcat.firstSub. Mprelu1_stage3_L2_0.weight) primals_155 = (self.fourthStage.firstConcat.secondSub. Mconv1_stage3_L2_1.weight) primals_156 = (self.fourthStage.firstConcat.secondSub. Mconv1_stage3_L2_1.bias) primals_157 = (self.fourthStage.firstConcat.secondSub. Mprelu1_stage3_L2_1.weight) primals_158 = (self.fourthStage.firstConcat.thirdSub. Mconv1_stage3_L2_2.weight) primals_159 = (self.fourthStage.firstConcat.thirdSub. Mconv1_stage3_L2_2.bias) primals_160 = (self.fourthStage.firstConcat.thirdSub. Mprelu1_stage3_L2_2.weight) primals_161 = (self.fourthStage.secondConcat.firstSub. Mconv2_stage3_L2_0.weight) primals_162 = (self.fourthStage.secondConcat.firstSub. Mconv2_stage3_L2_0.bias) primals_163 = (self.fourthStage.secondConcat.firstSub. Mprelu2_stage3_L2_0.weight) primals_164 = (self.fourthStage.secondConcat.secondSub. Mconv2_stage3_L2_1.weight) primals_165 = (self.fourthStage.secondConcat.secondSub. Mconv2_stage3_L2_1.bias) primals_166 = (self.fourthStage.secondConcat.secondSub. Mprelu2_stage3_L2_1.weight) primals_167 = (self.fourthStage.secondConcat.thirdSub. Mconv2_stage3_L2_2.weight) primals_168 = (self.fourthStage.secondConcat.thirdSub. Mconv2_stage3_L2_2.bias) primals_169 = (self.fourthStage.secondConcat.thirdSub. Mprelu2_stage3_L2_2.weight) primals_170 = (self.fourthStage.thirdConcat.firstSub. Mconv3_stage3_L2_0.weight) primals_171 = (self.fourthStage.thirdConcat.firstSub. Mconv3_stage3_L2_0.bias) primals_172 = (self.fourthStage.thirdConcat.firstSub. Mprelu3_stage3_L2_0.weight) primals_173 = (self.fourthStage.thirdConcat.secondSub. Mconv3_stage3_L2_1.weight) primals_174 = (self.fourthStage.thirdConcat.secondSub. Mconv3_stage3_L2_1.bias) primals_175 = (self.fourthStage.thirdConcat.secondSub. Mprelu3_stage3_L2_1.weight) primals_176 = (self.fourthStage.thirdConcat.thirdSub. Mconv3_stage3_L2_2.weight) primals_177 = (self.fourthStage.thirdConcat.thirdSub. Mconv3_stage3_L2_2.bias) primals_178 = (self.fourthStage.thirdConcat.thirdSub. Mprelu3_stage3_L2_2.weight) primals_179 = (self.fourthStage.fourthConcat.firstSub. Mconv4_stage3_L2_0.weight) primals_180 = (self.fourthStage.fourthConcat.firstSub. Mconv4_stage3_L2_0.bias) primals_181 = (self.fourthStage.fourthConcat.firstSub. Mprelu4_stage3_L2_0.weight) primals_182 = (self.fourthStage.fourthConcat.secondSub. Mconv4_stage3_L2_1.weight) primals_183 = (self.fourthStage.fourthConcat.secondSub. Mconv4_stage3_L2_1.bias) primals_184 = (self.fourthStage.fourthConcat.secondSub. Mprelu4_stage3_L2_1.weight) primals_185 = (self.fourthStage.fourthConcat.thirdSub. Mconv4_stage3_L2_2.weight) primals_186 = (self.fourthStage.fourthConcat.thirdSub. Mconv4_stage3_L2_2.bias) primals_187 = (self.fourthStage.fourthConcat.thirdSub. Mprelu4_stage3_L2_2.weight) primals_188 = (self.fourthStage.fifthConcat.firstSub. Mconv5_stage3_L2_0.weight) primals_189 = (self.fourthStage.fifthConcat.firstSub. Mconv5_stage3_L2_0.bias) primals_190 = (self.fourthStage.fifthConcat.firstSub. Mprelu5_stage3_L2_0.weight) primals_191 = (self.fourthStage.fifthConcat.secondSub. Mconv5_stage3_L2_1.weight) primals_192 = (self.fourthStage.fifthConcat.secondSub. Mconv5_stage3_L2_1.bias) primals_193 = (self.fourthStage.fifthConcat.secondSub. Mprelu5_stage3_L2_1.weight) primals_194 = (self.fourthStage.fifthConcat.thirdSub. Mconv5_stage3_L2_2.weight) primals_195 = (self.fourthStage.fifthConcat.thirdSub. Mconv5_stage3_L2_2.bias) primals_196 = (self.fourthStage.fifthConcat.thirdSub. Mprelu5_stage3_L2_2.weight) primals_197 = (self.fourthStage.afterConcatsFirst.Mconv6_stage3_L2. weight) primals_198 = self.fourthStage.afterConcatsFirst.Mconv6_stage3_L2.bias primals_199 = (self.fourthStage.afterConcatsFirst.Mprelu6_stage3_L2 .weight) primals_200 = (self.fourthStage.afterConcatsSecond.Mconv7_stage3_L2 .weight) primals_201 = self.fourthStage.afterConcatsSecond.Mconv7_stage3_L2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201]) return output[0]
byeongjokim/LateTemporalModeling3DCNN_for_sign
L2Part
false
1,687
[ "MIT" ]
0
e3a802fcf91dc3930aea782464ee34d9b747d3ab
https://github.com/byeongjokim/LateTemporalModeling3DCNN_for_sign/tree/e3a802fcf91dc3930aea782464ee34d9b747d3ab