entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
PyramidUp
import torch import torch.nn as nn from torch.nn import functional as F class PyramidUp(nn.Module): def __init__(self) ->None: super(PyramidUp, self).__init__() self.filter = nn.Parameter(torch.tensor([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, 36, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]], dtype=torch.float).reshape(1, 1, 5, 5) / 256, requires_grad=False) def forward(self, x: 'torch.Tensor') ->torch.Tensor: upsample = F.interpolate(x, scale_factor=2) results = [] for i in range(x.shape[1]): results.append(F.conv2d(upsample[:, i:i + 1, :, :], self.filter, padding=2)) return torch.cat(results, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 % 4 x0 = xindex % 64 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp9 & xmask, eviction_policy ='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x3, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 5, 5), (25, 25, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 8, 8), (256, 0, 8, 1), 0), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 8, 8), (64, 64, 8, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 8, 8), (256, 64, 8, 1), 64), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 8, 8), (64, 64, 8, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 8, 8), (256, 64, 8, 1), 128), arg1_1, stride=(1, 1), padding=(2, 2 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 8, 8), (64, 64, 8, 1)) buf4 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 8, 8), (256, 64, 8, 1), 192), arg1_1, stride=(1, 1), padding=(2, 2 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 8, 8), (64, 64, 8, 1)) del arg1_1 buf5 = buf0 del buf0 triton_poi_fused_cat_1[grid(1024)](buf1, buf2, buf3, buf4, buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf2 del buf3 del buf4 return buf5, class PyramidUpNew(nn.Module): def __init__(self) ->None: super(PyramidUpNew, self).__init__() self.filter = nn.Parameter(torch.tensor([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, 36, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]], dtype=torch.float).reshape(1, 1, 5, 5) / 256, requires_grad=False) def forward(self, input_0): arg1_1 = self.filter arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
masanorihirano/pytorch_extra_mhirano
PyramidUp
false
7,171
[ "MIT" ]
1
d19e07445567c069793b7ca1a22a846d7cbce58d
https://github.com/masanorihirano/pytorch_extra_mhirano/tree/d19e07445567c069793b7ca1a22a846d7cbce58d
ComprehensionLayer_step2
import math import torch import torch.nn as nn class ScaledDotProductAttention(nn.Module): def __init__(self, dropout=0.0): super(ScaledDotProductAttention, self).__init__() self.dropout = nn.Dropout(dropout) def forward(self, query, key, value): assert query.size()[-1] == key.size()[-1] dim = query.size()[-1] tmp_raw_scores = torch.div(torch.matmul(query, key.transpose(-2, -1 )), math.sqrt(dim)) atte_weights = torch.softmax(tmp_raw_scores, dim=-1) atte_weights = self.dropout(atte_weights) output = torch.matmul(atte_weights, value) return output, atte_weights class MultiHeadAttention(nn.Module): def __init__(self, embedding_dim, reduced_dim, n_head, dropout=0.0, eps =1e-08): super(MultiHeadAttention, self).__init__() assert reduced_dim % n_head == 0 self.n_head = n_head self.embedding_dim = embedding_dim self.reduced_dim = reduced_dim self.Wq = nn.Linear(embedding_dim, reduced_dim, bias=False) self.Wk = nn.Linear(embedding_dim, reduced_dim, bias=False) self.Wv = nn.Linear(embedding_dim, reduced_dim, bias=False) self.inner_attention = ScaledDotProductAttention(dropout) self.Wo = nn.Linear(reduced_dim, embedding_dim, bias=False) self.dropout = nn.Dropout(dropout) self.ln = nn.LayerNorm(embedding_dim, eps=eps) def forward(self, query): residual = query value = key = query query = self.Wq(query) key = self.Wk(key) value = self.Wv(value) b, n, _ = query.size() query = query.reshape(b, n, self.n_head, self.reduced_dim // self. n_head) b, m, _ = key.size() key = key.reshape(b, m, self.n_head, self.reduced_dim // self.n_head) value = value.reshape(b, m, self.n_head, self.reduced_dim // self. n_head) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) query, atte_weights = self.inner_attention(query, key, value) query = query.transpose(1, 2).reshape(b, n, self.reduced_dim) query = self.dropout(self.Wo(query)) query = query + residual query = self.ln(query) return query, atte_weights class ComprehensionLayer_step2(MultiHeadAttention): def __init__(self, embedding_dim, reduced_dim, n_head, dropout=0.0, eps =1e-08): super(ComprehensionLayer_step2, self).__init__(embedding_dim, reduced_dim, n_head, dropout) del self.ln self.mid_ln = nn.LayerNorm(embedding_dim, eps=eps) self.hig_ln = nn.LayerNorm(embedding_dim, eps=eps) def forward(self, low_vectors, mid_vectors, hig_vectors): b = low_vectors.size()[0] low_num, mid_num, hig_num = low_vectors.size()[1], mid_vectors.size()[1 ], hig_vectors.size()[1] mid_residual = mid_vectors hig_residual = hig_vectors query = self.Wq(torch.cat((mid_vectors, hig_vectors), dim=1)) key = self.Wk(torch.cat((low_vectors, mid_vectors), dim=1)) value = self.Wv(torch.cat((low_vectors, mid_vectors), dim=1)) mid_query, hig_query = torch.split(query, [mid_num, hig_num], dim=1) low_key, mid_key = torch.split(key, [low_num, mid_num], dim=1) low_value, mid_value = torch.split(value, [low_num, mid_num], dim=1) low_key = low_key.reshape(b, low_num, self.n_head, self.reduced_dim // self.n_head) low_value = low_value.reshape(b, low_num, self.n_head, self. reduced_dim // self.n_head) low_key = low_key.transpose(1, 2) low_value = low_value.transpose(1, 2) mid_query = mid_query.reshape(b, mid_num, self.n_head, self. reduced_dim // self.n_head) mid_key = mid_key.reshape(b, mid_num, self.n_head, self.reduced_dim // self.n_head) mid_value = mid_value.reshape(b, mid_num, self.n_head, self. reduced_dim // self.n_head) mid_query = mid_query.transpose(1, 2) mid_key = mid_key.transpose(1, 2) mid_value = mid_value.transpose(1, 2) hig_query = hig_query.reshape(b, hig_num, self.n_head, self. reduced_dim // self.n_head) hig_query = hig_query.transpose(1, 2) mid_query, mid_low_weights = self.inner_attention(mid_query, low_key, low_value) hig_query, hig_mid_weights = self.inner_attention(hig_query, mid_key, mid_value) mid_query = mid_query.transpose(1, 2).reshape(b, mid_num, self. reduced_dim) hig_query = hig_query.transpose(1, 2).reshape(b, hig_num, self. reduced_dim) output = self.dropout(self.Wo(torch.cat((mid_query, hig_query), dim=1)) ) mid_vectors, hig_vectors = torch.split(output, [mid_num, hig_num], dim=1) mid_vectors = mid_residual + mid_vectors hig_vectors = hig_residual + hig_vectors mid_vectors = self.mid_ln(mid_vectors) hig_vectors = self.hig_ln(hig_vectors) return mid_vectors, hig_vectors, mid_low_weights, hig_mid_weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'embedding_dim': 4, 'reduced_dim': 4, 'n_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (16 + y0 + 4 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x2 + x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x0 + 16 * x2 + (-4 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 32 * x1), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (16 + x0 + 32 * x1), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_3, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((32, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (32, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((32, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (32, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) del primals_5 buf4 = empty_strided_cuda((32, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (32, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) del primals_6 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf1, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf3, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 triton_poi_fused__softmax_3[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf4, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf1, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf1 buf13 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf3, buf13, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf14 = reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0) del buf8 extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf13, (16, 1, 4), (4, 0, 1), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf14, buf15, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf14 triton_poi_fused__softmax_3[grid(256)](buf15, buf16, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf15 buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf4, buf17, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 0), 0), out=buf18) buf19 = reinterpret_tensor(buf4, (4, 8, 4), (32, 4, 1), 0) del buf4 triton_poi_fused_cat_5[grid(128)](buf11, buf18, buf19, 128, XBLOCK= 128, num_warps=4, num_stages=1) buf20 = buf3 del buf3 extern_kernels.mm(reinterpret_tensor(buf19, (32, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf20) buf21 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_6[grid(64)](primals_2, buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf22 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_add_7[grid(64)](primals_3, buf20, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf20 del primals_3 buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_8[grid(16)](buf21, buf23, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(64)](buf21, buf23, buf24, primals_8, primals_9, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf26 = buf24 del buf24 buf27 = buf23 del buf23 triton_poi_fused_native_layer_norm_8[grid(16)](buf22, buf26, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(64)](buf22, buf26, buf27, primals_10, primals_11, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf26 del buf27 del primals_11 return (buf25, buf28, buf9, buf16, primals_8, primals_10, reinterpret_tensor(buf0, (32, 4), (4, 1), 0), reinterpret_tensor( buf2, (32, 4), (4, 1), 0), buf9, buf16, reinterpret_tensor(buf19, ( 32, 4), (4, 1), 0), buf21, buf22, primals_7, reinterpret_tensor( buf17, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf13, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0)) class ScaledDotProductAttention(nn.Module): def __init__(self, dropout=0.0): super(ScaledDotProductAttention, self).__init__() self.dropout = nn.Dropout(dropout) def forward(self, query, key, value): assert query.size()[-1] == key.size()[-1] dim = query.size()[-1] tmp_raw_scores = torch.div(torch.matmul(query, key.transpose(-2, -1 )), math.sqrt(dim)) atte_weights = torch.softmax(tmp_raw_scores, dim=-1) atte_weights = self.dropout(atte_weights) output = torch.matmul(atte_weights, value) return output, atte_weights class MultiHeadAttention(nn.Module): def __init__(self, embedding_dim, reduced_dim, n_head, dropout=0.0, eps =1e-08): super(MultiHeadAttention, self).__init__() assert reduced_dim % n_head == 0 self.n_head = n_head self.embedding_dim = embedding_dim self.reduced_dim = reduced_dim self.Wq = nn.Linear(embedding_dim, reduced_dim, bias=False) self.Wk = nn.Linear(embedding_dim, reduced_dim, bias=False) self.Wv = nn.Linear(embedding_dim, reduced_dim, bias=False) self.inner_attention = ScaledDotProductAttention(dropout) self.Wo = nn.Linear(reduced_dim, embedding_dim, bias=False) self.dropout = nn.Dropout(dropout) self.ln = nn.LayerNorm(embedding_dim, eps=eps) def forward(self, query): residual = query value = key = query query = self.Wq(query) key = self.Wk(key) value = self.Wv(value) b, n, _ = query.size() query = query.reshape(b, n, self.n_head, self.reduced_dim // self. n_head) b, m, _ = key.size() key = key.reshape(b, m, self.n_head, self.reduced_dim // self.n_head) value = value.reshape(b, m, self.n_head, self.reduced_dim // self. n_head) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) query, atte_weights = self.inner_attention(query, key, value) query = query.transpose(1, 2).reshape(b, n, self.reduced_dim) query = self.dropout(self.Wo(query)) query = query + residual query = self.ln(query) return query, atte_weights class ComprehensionLayer_step2New(MultiHeadAttention): def __init__(self, embedding_dim, reduced_dim, n_head, dropout=0.0, eps =1e-08): super(ComprehensionLayer_step2New, self).__init__(embedding_dim, reduced_dim, n_head, dropout) del self.ln self.mid_ln = nn.LayerNorm(embedding_dim, eps=eps) self.hig_ln = nn.LayerNorm(embedding_dim, eps=eps) def forward(self, input_0, input_1, input_2): primals_4 = self.Wq.weight primals_5 = self.Wk.weight primals_6 = self.Wv.weight primals_7 = self.Wo.weight primals_8 = self.mid_ln.weight primals_9 = self.mid_ln.bias primals_10 = self.hig_ln.weight primals_11 = self.hig_ln.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2], output[3]
luyu-fan/LRCM
ComprehensionLayer_step2
false
7,172
[ "MIT" ]
1
6b0e4d7998bc4969afa764eb753077e3f858f1ba
https://github.com/luyu-fan/LRCM/tree/6b0e4d7998bc4969afa764eb753077e3f858f1ba
ClassHead
import torch import torch.nn as nn class ClassHead(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(ClassHead, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) self.output_act = nn.LogSoftmax(dim=-1) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1) b, h, w, _c = out.shape out = out.view(b, h, w, self.num_anchors, 2) out = self.output_act(out) return out.contiguous().view(out.shape[0], -1, 2) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex % 6 x5 = xindex // 2 x1 = xindex // 2 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 2 * x5, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 2 * x1, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 2 * x5), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 2 * x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp5, tmp8) tmp10 = tmp2 - tmp9 tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused__log_softmax__log_softmax_backward_data_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + 2 * x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tmp8 = tl_math.exp(tmp7) tl.store(out_ptr0 + x2, tmp7, None) tl.store(out_ptr1 + x2, tmp8, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6)) buf2 = empty_strided_cuda((4, 64, 64, 3, 2), (24576, 384, 6, 2, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(98304)](buf1, primals_2, buf2, 98304, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf3 = reinterpret_tensor(buf1, (4, 64, 64, 3, 2), (24576, 384, 6, 2, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 64, 64, 3, 2), (24576, 384, 6, 2, 1), torch.float32) triton_poi_fused__log_softmax__log_softmax_backward_data_2[grid(98304) ](buf2, buf3, buf4, 98304, XBLOCK=512, num_warps=8, num_stages=1) del buf2 return reinterpret_tensor(buf3, (4, 12288, 2), (24576, 2, 1), 0 ), primals_1, buf0, buf4 class ClassHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(ClassHeadNew, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) self.output_act = nn.LogSoftmax(dim=-1) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lurenjia307/RetinaPedestrian_Pytorch
ClassHead
false
7,173
[ "MIT" ]
1
59c4aa50f3ef2ecb1113ad3b9950e8bbbff1206f
https://github.com/lurenjia307/RetinaPedestrian_Pytorch/tree/59c4aa50f3ef2ecb1113ad3b9950e8bbbff1206f
LaplacianPyramidLayer
import torch from typing import Tuple import torch.nn as nn from torch.nn import functional as F class PyramidDown(nn.Module): def __init__(self) ->None: super(PyramidDown, self).__init__() self.filter = nn.Parameter(torch.tensor([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, 36, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]], dtype=torch.float).reshape(1, 1, 5, 5) / 256, requires_grad=False) def forward(self, x: 'torch.Tensor') ->torch.Tensor: results = [] for i in range(x.shape[1]): results.append(F.conv2d(x[:, i:i + 1, :, :], self.filter, padding=2, stride=2)) return torch.cat(results, dim=1) class PyramidUp(nn.Module): def __init__(self) ->None: super(PyramidUp, self).__init__() self.filter = nn.Parameter(torch.tensor([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, 36, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]], dtype=torch.float).reshape(1, 1, 5, 5) / 256, requires_grad=False) def forward(self, x: 'torch.Tensor') ->torch.Tensor: upsample = F.interpolate(x, scale_factor=2) results = [] for i in range(x.shape[1]): results.append(F.conv2d(upsample[:, i:i + 1, :, :], self.filter, padding=2)) return torch.cat(results, dim=1) class LaplacianPyramidLayer(nn.Module): def __init__(self) ->None: super(LaplacianPyramidLayer, self).__init__() self.pyramid_down = PyramidDown() self.pyramid_up = PyramidUp() def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: y = x if x.shape[-1] % 2 != 0: y = torch.cat([y, torch.zeros(y.shape[:-1]).unsqueeze(dim=-1)], dim=-1) if x.shape[-2] % 2 != 0: y = y.transpose(-1, -2) y = torch.cat([y, torch.zeros(y.shape[:-1]).unsqueeze(dim=-1)], dim=-1) y = y.transpose(-1, -2) down: 'torch.Tensor' = self.pyramid_down(y) remade: 'torch.Tensor' = self.pyramid_up(down) diff: 'torch.Tensor' = y - remade if x.shape[-1] % 2 != 0: diff = diff[:, :, :, :-1] if x.shape[-1] % 2 != 0: diff = diff[:, :, :-1, :] return diff, down, remade def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy ='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp16 & xmask, eviction_policy ='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x3, tmp22, xmask) @triton.jit def triton_poi_fused__unsafe_index_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 2 * tmp4 + 4 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_cat_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp23 = tl.load(in_ptr4 + x3, xmask) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * x2), tmp9 & xmask, eviction_policy ='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 16 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp24 = tmp23 - tmp22 tl.store(out_ptr0 + x3, tmp22, xmask) tl.store(out_ptr1 + x3, tmp24, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(arg2_1, (1, 1, 5, 5), (25, 25, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 16, 4, 1), 0), arg1_1, stride=(2, 2), padding=(2, 2 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 2, 2), (4, 4, 2, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 16, 4, 1), 16), arg1_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 2, 2), (4, 4, 2, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 16, 4, 1), 32), arg1_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 2, 2), (4, 4, 2, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 16, 4, 1), 48), arg1_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 2, 2), (4, 4, 2, 1)) del arg1_1 buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(64)](buf0, buf1, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del buf2 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__unsafe_index_1[grid(256)](buf4, buf5, 256, XBLOCK =128, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1)) buf7 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1, 4, 4), (64, 16, 4, 1), 16), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1)) buf8 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1, 4, 4), (64, 16, 4, 1), 32), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 4, 4), (16, 16, 4, 1)) buf9 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1, 4, 4), (64, 16, 4, 1), 48), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 1, 4, 4), (16, 16, 4, 1)) del arg2_1 buf10 = buf5 del buf5 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_cat_sub_2[grid(256)](buf6, buf7, buf8, buf9, arg0_1, buf10, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf6 del buf7 del buf8 del buf9 return buf11, buf4, buf10 class PyramidDown(nn.Module): def __init__(self) ->None: super(PyramidDown, self).__init__() self.filter = nn.Parameter(torch.tensor([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, 36, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]], dtype=torch.float).reshape(1, 1, 5, 5) / 256, requires_grad=False) def forward(self, x: 'torch.Tensor') ->torch.Tensor: results = [] for i in range(x.shape[1]): results.append(F.conv2d(x[:, i:i + 1, :, :], self.filter, padding=2, stride=2)) return torch.cat(results, dim=1) class PyramidUp(nn.Module): def __init__(self) ->None: super(PyramidUp, self).__init__() self.filter = nn.Parameter(torch.tensor([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, 36, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]], dtype=torch.float).reshape(1, 1, 5, 5) / 256, requires_grad=False) def forward(self, x: 'torch.Tensor') ->torch.Tensor: upsample = F.interpolate(x, scale_factor=2) results = [] for i in range(x.shape[1]): results.append(F.conv2d(upsample[:, i:i + 1, :, :], self.filter, padding=2)) return torch.cat(results, dim=1) class LaplacianPyramidLayerNew(nn.Module): def __init__(self) ->None: super(LaplacianPyramidLayerNew, self).__init__() self.pyramid_down = PyramidDown() self.pyramid_up = PyramidUp() def forward(self, input_0): arg1_1 = self.pyramid_down.filter arg2_1 = self.pyramid_up.filter arg0_1 = input_0 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1], output[2]
masanorihirano/pytorch_extra_mhirano
LaplacianPyramidLayer
false
7,174
[ "MIT" ]
1
d19e07445567c069793b7ca1a22a846d7cbce58d
https://github.com/masanorihirano/pytorch_extra_mhirano/tree/d19e07445567c069793b7ca1a22a846d7cbce58d
ActorNet
import torch import torch.nn as nn import torch.nn.functional as F class ActorNet(nn.Module): def __init__(self): super(ActorNet, self).__init__() self.fc1 = nn.Linear(4, 20) self.fc2 = nn.Linear(20, 40) self.fc3 = nn.Linear(40, 50) self.fc4 = nn.Linear(50, 30) self.fc5 = nn.Linear(30, 12) self.fc6 = nn.Linear(12, 2) def forward(self, x): x = self.fc1(x) x = F.leaky_relu(self.fc2(x)) x = F.leaky_relu(self.fc3(x)) x = F.leaky_relu(self.fc4(x)) x = F.leaky_relu(self.fc5(x)) x = self.fc6(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 40 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 30 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (20, 4), (4, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (40, 20), (20, 1)) assert_size_stride(primals_5, (40,), (1,)) assert_size_stride(primals_6, (50, 40), (40, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (30, 50), (50, 1)) assert_size_stride(primals_9, (30,), (1,)) assert_size_stride(primals_10, (12, 30), (30, 1)) assert_size_stride(primals_11, (12,), (1,)) assert_size_stride(primals_12, (2, 12), (12, 1)) assert_size_stride(primals_13, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 40), (40, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (20, 40), (1, 20), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 40), (640, 160, 40, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 40), (640, 160, 40, 1), torch. float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(2560)](buf1, primals_5, buf2, buf3, 2560, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_5 buf4 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 40), (40, 1), 0), reinterpret_tensor(primals_6, (40, 50), (1, 40), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch. float32) triton_poi_fused_leaky_relu_1[grid(3200)](buf4, primals_7, buf5, buf6, 3200, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_7 buf7 = empty_strided_cuda((64, 30), (30, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 50), (50, 1), 0), reinterpret_tensor(primals_8, (50, 30), (1, 50), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool) buf9 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch. float32) triton_poi_fused_leaky_relu_2[grid(1920)](buf7, primals_9, buf8, buf9, 1920, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del primals_9 buf10 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (64, 30), (30, 1), 0), reinterpret_tensor(primals_10, (30, 12), (1, 30), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool) buf12 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_leaky_relu_3[grid(768)](buf10, primals_11, buf11, buf12, 768, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del primals_11 buf13 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (64, 12), (12, 1), 0), reinterpret_tensor(primals_12, (12, 2), (1, 12), 0 ), alpha=1, beta=1, out=buf13) del primals_13 return reinterpret_tensor(buf13, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf2, reinterpret_tensor(buf3, (64, 40), (40, 1), 0 ), buf5, reinterpret_tensor(buf6, (64, 50), (50, 1), 0 ), buf8, reinterpret_tensor(buf9, (64, 30), (30, 1), 0 ), buf11, reinterpret_tensor(buf12, (64, 12), (12, 1), 0 ), primals_12, primals_10, primals_8, primals_6, primals_4 class ActorNetNew(nn.Module): def __init__(self): super(ActorNetNew, self).__init__() self.fc1 = nn.Linear(4, 20) self.fc2 = nn.Linear(20, 40) self.fc3 = nn.Linear(40, 50) self.fc4 = nn.Linear(50, 30) self.fc5 = nn.Linear(30, 12) self.fc6 = nn.Linear(12, 2) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_10 = self.fc5.weight primals_11 = self.fc5.bias primals_12 = self.fc6.weight primals_13 = self.fc6.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
mathildebadoual/RL_power_systems
ActorNet
false
7,175
[ "MIT" ]
1
825e60bad16129e0a0229d15af5110b26e0a1577
https://github.com/mathildebadoual/RL_power_systems/tree/825e60bad16129e0a0229d15af5110b26e0a1577
MyKernelTorch
import torch import torch.nn as nn class MyKernelTorch(nn.Module): def __init__(self, n_features: 'int'): super().__init__() self.dense1 = nn.Linear(n_features, 20) self.dense2 = nn.Linear(20, 2) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = nn.ReLU()(self.dense1(x)) return self.dense2(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (20, 4), (4, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 20), (20, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1, primals_2, buf3, 1280, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 20), (20, 1), 0), reinterpret_tensor(primals_4, (20, 2), (1, 20), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 20), (20, 1), 0), primals_4, buf3 class MyKernelTorchNew(nn.Module): def __init__(self, n_features: 'int'): super().__init__() self.dense1 = nn.Linear(n_features, 20) self.dense2 = nn.Linear(20, 2) def forward(self, input_0): primals_1 = self.dense1.weight primals_2 = self.dense1.bias primals_4 = self.dense2.weight primals_5 = self.dense2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
maxpark/alibi-detect
MyKernelTorch
false
7,176
[ "Apache-2.0" ]
1
84384297a85764c18537aa1c8699c4ad040cf7cd
https://github.com/maxpark/alibi-detect/tree/84384297a85764c18537aa1c8699c4ad040cf7cd
ResidualConnection
import torch import torch.nn as nn class ResidualConnection(nn.Module): def __init__(self, *layers): super(ResidualConnection, self).__init__() self.layers = nn.Sequential(*layers) def forward(self, input): return (input + self.layers(input)) / 2.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ResidualConnectionNew(nn.Module): def __init__(self, *layers): super(ResidualConnectionNew, self).__init__() self.layers = nn.Sequential(*layers) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
maxkvant/LinearizedNNs
ResidualConnection
false
7,177
[ "Apache-2.0" ]
1
eb0198be70ca55e7463b97a5023d2f6ffe0f8ba6
https://github.com/maxkvant/LinearizedNNs/tree/eb0198be70ca55e7463b97a5023d2f6ffe0f8ba6
NormalizeImages
import torch import torch.nn as nn class NormalizeImages(nn.Module): def __init__(self): super().__init__() def forward(self, x): flat = x.view(x.size(0), -1) mp = torch.mean(flat, dim=1) sp = torch.std(flat, dim=1) + 1e-07 return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1). expand_as(x)) / sp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(1 ).expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mean_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = tmp0 - tmp20 tmp22 = 63.0 tmp23 = tmp18 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-07 tmp26 = tmp24 + tmp25 tmp27 = tmp21 / tmp26 tl.store(out_ptr2 + (r1 + 64 * x0), tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mean_std_sub_0[grid(4)](arg0_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class NormalizeImagesNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
matteo-ronchetti/IKA
NormalizeImages
false
7,178
[ "MIT" ]
1
29d1752a059c3ab7659b332b72bf8c1506e7dd20
https://github.com/matteo-ronchetti/IKA/tree/29d1752a059c3ab7659b332b72bf8c1506e7dd20
SoftmaxAttention
import torch import torch.nn as nn def masked_softmax(tensor, mask): """ Apply a masked softmax on the last dimension of a tensor. The input tensor and mask should be of size (batch, *, sequence_length). Args: tensor: The tensor on which the softmax function must be applied along the last dimension. mask: A mask of the same size as the tensor with 0s in the positions of the values that must be masked and 1s everywhere else. Returns: A tensor of the same size as the inputs containing the result of the softmax. """ tensor_shape = tensor.size() reshaped_tensor = tensor.view(-1, tensor_shape[-1]) while mask.dim() < tensor.dim(): mask = mask.unsqueeze(1) mask = mask.expand_as(tensor).contiguous().float() reshaped_mask = mask.view(-1, mask.size()[-1]) result = nn.functional.softmax(reshaped_tensor * reshaped_mask, dim=-1) result = result * reshaped_mask result = result / (result.sum(dim=-1, keepdim=True) + 1e-13) return result.view(*tensor_shape) def weighted_sum(tensor, weights, mask): """ Apply a weighted sum on the vectors along the last dimension of 'tensor', and mask the vectors in the result with 'mask'. Args: tensor: A tensor of vectors on which a weighted sum must be applied. weights: The weights to use in the weighted sum. mask: A mask to apply on the result of the weighted sum. Returns: A new tensor containing the result of the weighted sum after the mask has been applied on it. """ weighted_sum = weights.bmm(tensor) while mask.dim() < weighted_sum.dim(): mask = mask.unsqueeze(1) mask = mask.transpose(-1, -2) mask = mask.expand_as(weighted_sum).contiguous().float() return weighted_sum * mask class SoftmaxAttention(nn.Module): """ Attention layer taking premises and hypotheses encoded by an RNN as input and computing the soft attention between their elements. The dot product of the encoded vectors in the premises and hypotheses is first computed. The softmax of the result is then used in a weighted sum of the vectors of the premises for each element of the hypotheses, and conversely for the elements of the premises. """ def forward(self, premise_batch, premise_mask, hypothesis_batch, hypothesis_mask): """ Args: premise_batch: A batch of sequences of vectors representing the premises in some NLI task. The batch is assumed to have the size (batch, sequences, vector_dim). premise_mask: A mask for the sequences in the premise batch, to ignore padding data in the sequences during the computation of the attention. hypothesis_batch: A batch of sequences of vectors representing the hypotheses in some NLI task. The batch is assumed to have the size (batch, sequences, vector_dim). hypothesis_mask: A mask for the sequences in the hypotheses batch, to ignore padding data in the sequences during the computation of the attention. Returns: attended_premises: The sequences of attention vectors for the premises in the input batch. attended_hypotheses: The sequences of attention vectors for the hypotheses in the input batch. prem_hyp_attn: TODO hyp_prem_attn: TODO """ similarity_matrix = premise_batch.bmm(hypothesis_batch.transpose(2, 1).contiguous()) prem_hyp_attn = masked_softmax(similarity_matrix, hypothesis_mask) hyp_prem_attn = masked_softmax(similarity_matrix.transpose(1, 2). contiguous(), premise_mask) attended_premises = weighted_sum(hypothesis_batch, prem_hyp_attn, premise_mask) attended_hypotheses = weighted_sum(premise_batch, hyp_prem_attn, hypothesis_mask) return (attended_premises, attended_hypotheses, prem_hyp_attn, hyp_prem_attn) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4] ), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * (x0 // 4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 / tmp25 tmp27 = tmp26 * tmp1 tmp28 = tmp18 / tmp25 tmp29 = tmp28 * tmp4 tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp25 tmp32 = tmp31 * tmp8 tmp33 = tmp30 + tmp32 tmp34 = tmp24 / tmp25 tmp35 = tmp34 * tmp12 tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + x0, tmp14, xmask) tl.store(out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr2 + x0, tmp36, xmask) @triton.jit def triton_poi_fused__softmax_add_div_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * (x1 // 4)), xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = tmp7 * tmp1 tmp10 = 1e-13 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_clone_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * (x0 // 4) + x0 % 4), xmask) tmp1 = tl.load(in_ptr1 + 4 * (x0 // 4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (4 + 16 * (x0 // 4) + x0 % 4), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (8 + 16 * (x0 // 4) + x0 % 4), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + 16 * (x0 // 4) + x0 % 4), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 / tmp25 tmp27 = tmp26 * tmp1 tmp28 = tmp18 / tmp25 tmp29 = tmp28 * tmp4 tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp25 tmp32 = tmp31 * tmp8 tmp33 = tmp30 + tmp32 tmp34 = tmp24 / tmp25 tmp35 = tmp34 * tmp12 tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + x0, tmp14, xmask) tl.store(out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr2 + x0, tmp36, xmask) @triton.jit def triton_poi_fused__softmax_add_div_mul_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 4 * (y0 // 4)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = tmp7 * tmp1 tmp10 = 1e-13 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr0 + (x1 + 4 * y0), tmp12, xmask & ymask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, buf0, out=buf1) buf2 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf3 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf4 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused__softmax_mul_sum_1[grid(16)](buf1, arg2_1, buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_add_div_mul_2[grid(64)](buf1, arg2_1, buf2, buf3, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), arg1_1, out=buf6) del arg1_1 buf7 = buf6 del buf6 triton_poi_fused_clone_mul_3[grid(64)](buf7, arg3_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf4 del buf4 buf9 = buf3 del buf3 buf10 = buf2 del buf2 triton_poi_fused__softmax_mul_sum_4[grid(16)](buf1, arg3_1, buf8, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_div_mul_5[grid(16, 4)](buf1, arg3_1, buf8, buf9, buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=8, num_warps= 1, num_stages=1) del arg3_1 del buf10 del buf8 del buf9 buf12 = buf1 del buf1 extern_kernels.bmm(reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), arg0_1, out=buf12) del arg0_1 buf13 = buf12 del buf12 triton_poi_fused_clone_mul_3[grid(64)](buf13, arg2_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg2_1 return buf7, buf13, reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) def masked_softmax(tensor, mask): """ Apply a masked softmax on the last dimension of a tensor. The input tensor and mask should be of size (batch, *, sequence_length). Args: tensor: The tensor on which the softmax function must be applied along the last dimension. mask: A mask of the same size as the tensor with 0s in the positions of the values that must be masked and 1s everywhere else. Returns: A tensor of the same size as the inputs containing the result of the softmax. """ tensor_shape = tensor.size() reshaped_tensor = tensor.view(-1, tensor_shape[-1]) while mask.dim() < tensor.dim(): mask = mask.unsqueeze(1) mask = mask.expand_as(tensor).contiguous().float() reshaped_mask = mask.view(-1, mask.size()[-1]) result = nn.functional.softmax(reshaped_tensor * reshaped_mask, dim=-1) result = result * reshaped_mask result = result / (result.sum(dim=-1, keepdim=True) + 1e-13) return result.view(*tensor_shape) def weighted_sum(tensor, weights, mask): """ Apply a weighted sum on the vectors along the last dimension of 'tensor', and mask the vectors in the result with 'mask'. Args: tensor: A tensor of vectors on which a weighted sum must be applied. weights: The weights to use in the weighted sum. mask: A mask to apply on the result of the weighted sum. Returns: A new tensor containing the result of the weighted sum after the mask has been applied on it. """ weighted_sum = weights.bmm(tensor) while mask.dim() < weighted_sum.dim(): mask = mask.unsqueeze(1) mask = mask.transpose(-1, -2) mask = mask.expand_as(weighted_sum).contiguous().float() return weighted_sum * mask class SoftmaxAttentionNew(nn.Module): """ Attention layer taking premises and hypotheses encoded by an RNN as input and computing the soft attention between their elements. The dot product of the encoded vectors in the premises and hypotheses is first computed. The softmax of the result is then used in a weighted sum of the vectors of the premises for each element of the hypotheses, and conversely for the elements of the premises. """ def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg2_1 = input_1 arg1_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1], output[2], output[3]
marvosyntactical/fs2018ex3viz
SoftmaxAttention
false
7,179
[ "Apache-2.0" ]
1
9002133a45b52c596efa91d842f691fe1f066a6c
https://github.com/marvosyntactical/fs2018ex3viz/tree/9002133a45b52c596efa91d842f691fe1f066a6c
_leaky_relu
import torch from torch import nn class _leaky_relu(nn.Module): def __init__(self): super(_leaky_relu, self).__init__() def forward(self, x): x_neg = 0.1 * x return torch.max(x_neg, x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_maximum_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = tmp0 * tmp1 tmp3 = triton_helpers.maximum(tmp2, tmp0) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_maximum_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class _leaky_reluNew(nn.Module): def __init__(self): super(_leaky_reluNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
maxuanquang/SfmLearner-Redesign
_leaky_relu
false
7,180
[ "MIT" ]
1
0250a9cc443b5754ba45f69153a03ca26f903a7b
https://github.com/maxuanquang/SfmLearner-Redesign/tree/0250a9cc443b5754ba45f69153a03ca26f903a7b
CriticNet
import torch import torch.nn as nn import torch.nn.functional as F class CriticNet(nn.Module): def __init__(self): super(CriticNet, self).__init__() self.fc1 = nn.Linear(4, 20) self.fc2 = nn.Linear(20, 40) self.fc3 = nn.Linear(40, 30) self.fc4 = nn.Linear(30, 8) self.fc5 = nn.Linear(8, 1) def forward(self, x): x = self.fc1(x) x = F.leaky_relu(self.fc2(x)) x = F.leaky_relu(self.fc3(x)) x = F.leaky_relu(self.fc4(x)) x = self.fc5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 40 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 30 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (20, 4), (4, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (40, 20), (20, 1)) assert_size_stride(primals_5, (40,), (1,)) assert_size_stride(primals_6, (30, 40), (40, 1)) assert_size_stride(primals_7, (30,), (1,)) assert_size_stride(primals_8, (8, 30), (30, 1)) assert_size_stride(primals_9, (8,), (1,)) assert_size_stride(primals_10, (1, 8), (8, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 40), (40, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (20, 40), (1, 20), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 40), (640, 160, 40, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 40), (640, 160, 40, 1), torch. float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(2560)](buf1, primals_5, buf2, buf3, 2560, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_5 buf4 = empty_strided_cuda((64, 30), (30, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 40), (40, 1), 0), reinterpret_tensor(primals_6, (40, 30), (1, 40), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch. float32) triton_poi_fused_leaky_relu_1[grid(1920)](buf4, primals_7, buf5, buf6, 1920, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_7 buf7 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 30), (30, 1), 0), reinterpret_tensor(primals_8, (30, 8), (1, 30), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(512)](buf7, primals_9, buf8, buf9, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del primals_9 buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (64, 8), (8, 1), 0), reinterpret_tensor(primals_10, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf2, reinterpret_tensor(buf3, (64, 40), (40, 1), 0 ), buf5, reinterpret_tensor(buf6, (64, 30), (30, 1), 0 ), buf8, reinterpret_tensor(buf9, (64, 8), (8, 1), 0 ), primals_10, primals_8, primals_6, primals_4 class CriticNetNew(nn.Module): def __init__(self): super(CriticNetNew, self).__init__() self.fc1 = nn.Linear(4, 20) self.fc2 = nn.Linear(20, 40) self.fc3 = nn.Linear(40, 30) self.fc4 = nn.Linear(30, 8) self.fc5 = nn.Linear(8, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_10 = self.fc5.weight primals_11 = self.fc5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
mathildebadoual/RL_power_systems
CriticNet
false
7,181
[ "MIT" ]
1
825e60bad16129e0a0229d15af5110b26e0a1577
https://github.com/mathildebadoual/RL_power_systems/tree/825e60bad16129e0a0229d15af5110b26e0a1577
ZeroConv2d
import torch from torch import nn from torch.nn import functional as F class ZeroConv2d(nn.Module): def __init__(self, in_channel, out_channel, padding=1): super().__init__() self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) def forward(self, input): out = F.pad(input, [1, 1, 1, 1], value=1) out = self.conv(out) out = out * torch.exp(self.scale * 3) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=1.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_exp_mul_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_exp_mul_1[grid(256)](buf2, primals_3, primals_4, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf3, primals_2, primals_4, buf0, buf2 class ZeroConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, padding=1): super().__init__() self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) def forward(self, input_0): primals_4 = self.scale primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
mbaddar1/glow-pytorch
ZeroConv2d
false
7,182
[ "MIT" ]
1
e07ca542ce4dd93ddf680c51eda25d1f9db252a1
https://github.com/mbaddar1/glow-pytorch/tree/e07ca542ce4dd93ddf680c51eda25d1f9db252a1
BasicGraphConvolutionLayer
import torch from torch.nn.parameter import Parameter class BasicGraphConvolutionLayer(torch.nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.W2 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.W1 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.bias = Parameter(torch.zeros(out_channels, dtype=torch.float32)) def forward(self, X, A): potential_msgs = torch.mm(X, self.W2) propagated_msgs = torch.mm(A, potential_msgs) root_update = torch.mm(X, self.W1) output = propagated_msgs + root_update + self.bias return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels._mm_plus_mm(primals_3, buf0, primals_2, primals_4, out=buf1) del buf0 del primals_4 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_0[grid(16)](buf2, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0) class BasicGraphConvolutionLayerNew(torch.nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.W2 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.W1 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.bias = Parameter(torch.zeros(out_channels, dtype=torch.float32)) def forward(self, input_0, input_1): primals_1 = self.W2 primals_2 = self.W1 primals_5 = self.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mbrukman/machine-learning-book
BasicGraphConvolutionLayer
false
7,183
[ "MIT" ]
1
f29a0f8aafa63a77081f3bcec68866e33dd41776
https://github.com/mbrukman/machine-learning-book/tree/f29a0f8aafa63a77081f3bcec68866e33dd41776
InvConv2d
import torch from torch import nn from torch.nn import functional as F class InvConv2d(nn.Module): def __init__(self, in_channel): super().__init__() weight = torch.randn(in_channel, in_channel) q, _ = torch.qr(weight) weight = q.unsqueeze(2).unsqueeze(3) self.weight = nn.Parameter(weight) def forward(self, input): _, _, height, width = input.shape out = F.conv2d(input, self.weight) logdet = height * width * torch.slogdet(self.weight.squeeze().double() )[1].float() return out, logdet def reverse(self, output): return F.conv2d(output, self.weight.squeeze().inverse().unsqueeze(2 ).unsqueeze(3)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tmp1.to(tl.float32) tmp3 = 16.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp4, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (1, 4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(4, 4)](primals_2, buf0, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(primals_1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) del buf0 buf2 = empty_strided_cuda((4, 4), (1, 4), torch.float64) triton_poi_fused__to_copy_1[grid(16)](primals_2, buf2, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf3 = torch.ops.aten._linalg_slogdet.default(buf2) del buf2 buf5 = buf3[1] buf6 = buf3[2] buf7 = buf3[3] del buf3 buf8 = empty_strided_cuda((), (), torch.float32) triton_poi_fused__to_copy_mul_2[grid(1)](buf5, buf8, 1, XBLOCK=1, num_warps=1, num_stages=1) del buf5 return buf1, buf8, primals_1, primals_2, buf6, buf7 class InvConv2dNew(nn.Module): def __init__(self, in_channel): super().__init__() weight = torch.randn(in_channel, in_channel) q, _ = torch.qr(weight) weight = q.unsqueeze(2).unsqueeze(3) self.weight = nn.Parameter(weight) def reverse(self, output): return F.conv2d(output, self.weight.squeeze().inverse().unsqueeze(2 ).unsqueeze(3)) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
mbaddar1/glow-pytorch
InvConv2d
false
7,184
[ "MIT" ]
1
e07ca542ce4dd93ddf680c51eda25d1f9db252a1
https://github.com/mbaddar1/glow-pytorch/tree/e07ca542ce4dd93ddf680c51eda25d1f9db252a1
ScaledDotProductAttention
import torch import torch.optim.lr_scheduler import torch.nn as nn class ScaledDotProductAttention(nn.Module): def __init__(self, d_model, attention_dropout=0.1): super(ScaledDotProductAttention, self).__init__() self.temper = d_model ** 0.5 self.dropout = nn.Dropout(attention_dropout) self.softmax = nn.Softmax(dim=-1) def forward(self, q, k, v, attn_mask=None): attn = torch.bmm(q, k.transpose(1, 2)) / self.temper if attn_mask is not None: assert attn_mask.size() == attn.size( ), 'Attention mask shape {} mismatch with Attention logit tensor shape {}.'.format( attn_mask.size(), attn.size()) attn.data.masked_fill_(attn_mask, -float('inf')) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.optim.lr_scheduler import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class ScaledDotProductAttentionNew(nn.Module): def __init__(self, d_model, attention_dropout=0.1): super(ScaledDotProductAttentionNew, self).__init__() self.temper = d_model ** 0.5 self.dropout = nn.Dropout(attention_dropout) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
mcoavoux/self-attentive-parser
ScaledDotProductAttention
false
7,185
[ "MIT" ]
1
fa5814ecfdbf4fde329ea725e1d2ddaa55f247d6
https://github.com/mcoavoux/self-attentive-parser/tree/fa5814ecfdbf4fde329ea725e1d2ddaa55f247d6
LayerNorm
import torch import torch.multiprocessing from torch import nn from torch.nn import functional as F import torch.optim import torch.utils.data import torch.distributed class LayerNorm(nn.Module): def __init__(self, channels: 'int', eps: 'float'=1e-05): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): x = x.transpose(1, -1) x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) return x.transpose(1, -1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.multiprocessing from torch import nn import torch.optim import torch.utils.data import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y2 = yindex // 16 y4 = yindex % 16 y5 = yindex y0 = yindex % 4 y1 = yindex // 4 % 4 tmp0 = tl.load(in_ptr0 + (y4 + 16 * x3 + 64 * y2), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y5, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y5, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x3 + 4 * y1 + 16 * y0 + 64 * y2), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64, 4)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 1, 4, 16), 0), primals_1 class LayerNormNew(nn.Module): def __init__(self, channels: 'int', eps: 'float'=1e-05): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mbarnig/vits-train
LayerNorm
false
7,186
[ "MIT" ]
1
cfb8a0fc91daad868fe3d062ebf85d62edbd7506
https://github.com/mbarnig/vits-train/tree/cfb8a0fc91daad868fe3d062ebf85d62edbd7506
AvgPoolShortening
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class AvgPoolShortening(Module): """ ### Average pool shortening This down-samples by a given factor with average pooling """ def __init__(self, k: 'int'): """ * `k` is the shortening factor """ super().__init__() self.pool = nn.AvgPool1d(k, ceil_mode=True) def forward(self, x: 'torch.Tensor'): """ * `x` is of shape `[seq_len, batch_size, d_model]` """ return self.pool(x.permute(1, 2, 0)).permute(2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'k': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (1, 4, 4), (1, 4, 1), 0), class AvgPoolShorteningNew(Module): """ ### Average pool shortening This down-samples by a given factor with average pooling """ def __init__(self, k: 'int'): """ * `k` is the shortening factor """ super().__init__() self.pool = nn.AvgPool1d(k, ceil_mode=True) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
AvgPoolShortening
false
7,187
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
AttentionNet
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.functional def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class ConvRelu(nn.Module): def __init__(self, in_, out): super().__init__() self.conv = conv3x3(in_, out) self.activation = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.activation(x) return x class ConvRelu2(nn.Module): def __init__(self, _in, _out): super(ConvRelu2, self).__init__() self.cr1 = ConvRelu(_in, _out) self.cr2 = ConvRelu(_out, _out) def forward(self, x): x = self.cr1(x) x = self.cr2(x) return x class Coder(nn.Module): def __init__(self, in_size, out_size): super(Coder, self).__init__() self.conv = ConvRelu2(in_size, out_size) self.down = nn.MaxPool2d(2, 2) def forward(self, x): y1 = self.conv(x) y2 = self.down(y1) return y2, y1 class Decoder(nn.Module): def __init__(self, in_size, out_size): super(Decoder, self).__init__() self.conv = ConvRelu2(in_size, out_size) self.up = F.interpolate def forward(self, x1, x2): x2 = self.up(x2, scale_factor=2, mode='bilinear', align_corners=False) return self.conv(torch.cat([x1, x2], 1)) class AttentionNet(nn.Module): def __init__(self, in_channels=3, out_channels=1): super(AttentionNet, self).__init__() self.in_channels = in_channels self.out_channels = out_channels filters = [64, 128, 256] self.down1 = Coder(in_channels, filters[0]) self.down2 = Coder(filters[0], filters[1]) self.center = ConvRelu2(filters[1], filters[2]) self.up2 = Decoder(filters[2] + filters[1], filters[1]) self.up1 = Decoder(filters[1] + filters[0], filters[0]) self.final = nn.Conv2d(filters[0], out_channels, 1) def forward(self, x): x, befdown1 = self.down1(x) x, befdown2 = self.down2(x) x = self.center(x) x = self.up2(befdown2, x) x = self.up1(befdown1, x) x = self.final(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 15, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x5 = xindex // 1024 x2 = xindex // 1024 % 256 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 16 * tmp4 + 256 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 16 * tmp28 + 256 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 16 * tmp28 + 256 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tl.store(out_ptr0 + x6, tmp24, None) tl.store(out_ptr1 + x6, tmp40, None) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 384 x0 = xindex % 1024 x2 = xindex // 393216 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 384, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 1024 * (-128 + x1) + 262144 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (x0 + 1024 * (-128 + x1) + 262144 * x2), tmp6, other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x5 = xindex // 4096 x2 = xindex // 4096 % 128 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 32 * tmp4 + 1024 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 32 * tmp28 + 1024 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 32 * tmp28 + 1024 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tl.store(out_ptr0 + x6, tmp24, None) tl.store(out_ptr1 + x6, tmp40, None) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 192 x0 = xindex % 4096 x2 = xindex // 786432 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 192, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 524288 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (x0 + 4096 * (-64 + x1) + 524288 * x2), tmp6, other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (128, 384, 3, 3), (3456, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (64, 192, 3, 3), (1728, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (1, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_23, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4, buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_5[grid(32)](buf15, 32, XBLOCK=32, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_6[grid(32)](buf16, 32, XBLOCK=32, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_5[grid(32)](buf17, 32, XBLOCK=32, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_add_clamp_6[grid(32)](buf18, 32, XBLOCK=32, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((32,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(32)](buf19, 32, XBLOCK=32, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((32, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(32)](buf21, 32, XBLOCK=32, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((4, 256, 32, 32), (262144, 1024, 32, 1), torch.float32) buf22 = empty_strided_cuda((4, 256, 32, 32), (262144, 1024, 32, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_8[grid( 1048576)](buf15, buf17, buf14, primals_13, buf18, buf19, buf16, buf21, buf20, buf22, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((4, 384, 32, 32), (393216, 1024, 32, 1), torch.float32) triton_poi_fused_cat_9[grid(1572864)](buf9, buf20, buf22, buf23, 1572864, XBLOCK=1024, num_warps=4, num_stages=1) del buf20 del buf22 buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_2[grid(524288)](buf25, primals_15, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf27 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_10[grid(64)](buf27, 64, XBLOCK=64, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_11[grid(64)](buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_10[grid(64)](buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_11[grid(64)](buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) buf31 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(64)](buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(64)](buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) buf32 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf34 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13[grid (2097152)](buf27, buf29, buf26, primals_17, buf30, buf31, buf28, buf33, buf32, buf34, 2097152, XBLOCK=512, num_warps=8, num_stages=1 ) buf35 = empty_strided_cuda((4, 192, 64, 64), (786432, 4096, 64, 1), torch.float32) triton_poi_fused_cat_14[grid(3145728)](buf3, buf32, buf34, buf35, 3145728, XBLOCK=1024, num_warps=4, num_stages=1) del buf32 del buf34 buf36 = extern_kernels.convolution(buf35, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_0[grid(1048576)](buf37, primals_19, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf38 = extern_kernels.convolution(buf37, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_0[grid(1048576)](buf39, primals_21, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf40 = extern_kernels.convolution(buf39, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf41 = buf40 del buf40 triton_poi_fused_convolution_15[grid(16384)](buf41, primals_23, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf42 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(524288)]( buf26, primals_17, buf42, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf26 del primals_17 buf43 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_17[grid(262144)]( buf14, primals_13, buf43, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf14 del primals_13 return (buf41, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf18, buf19, buf21, buf23, buf25, buf27, buf28, buf29, buf30, buf31, buf33, buf35, buf37, buf39, buf42, buf43) def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class ConvRelu(nn.Module): def __init__(self, in_, out): super().__init__() self.conv = conv3x3(in_, out) self.activation = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.activation(x) return x class ConvRelu2(nn.Module): def __init__(self, _in, _out): super(ConvRelu2, self).__init__() self.cr1 = ConvRelu(_in, _out) self.cr2 = ConvRelu(_out, _out) def forward(self, x): x = self.cr1(x) x = self.cr2(x) return x class Coder(nn.Module): def __init__(self, in_size, out_size): super(Coder, self).__init__() self.conv = ConvRelu2(in_size, out_size) self.down = nn.MaxPool2d(2, 2) def forward(self, x): y1 = self.conv(x) y2 = self.down(y1) return y2, y1 class Decoder(nn.Module): def __init__(self, in_size, out_size): super(Decoder, self).__init__() self.conv = ConvRelu2(in_size, out_size) self.up = F.interpolate def forward(self, x1, x2): x2 = self.up(x2, scale_factor=2, mode='bilinear', align_corners=False) return self.conv(torch.cat([x1, x2], 1)) class AttentionNetNew(nn.Module): def __init__(self, in_channels=3, out_channels=1): super(AttentionNetNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels filters = [64, 128, 256] self.down1 = Coder(in_channels, filters[0]) self.down2 = Coder(filters[0], filters[1]) self.center = ConvRelu2(filters[1], filters[2]) self.up2 = Decoder(filters[2] + filters[1], filters[1]) self.up1 = Decoder(filters[1] + filters[0], filters[0]) self.final = nn.Conv2d(filters[0], out_channels, 1) def forward(self, input_0): primals_1 = self.down1.conv.cr1.conv.weight primals_2 = self.down1.conv.cr1.conv.bias primals_4 = self.down1.conv.cr2.conv.weight primals_5 = self.down1.conv.cr2.conv.bias primals_6 = self.down2.conv.cr1.conv.weight primals_7 = self.down2.conv.cr1.conv.bias primals_8 = self.down2.conv.cr2.conv.weight primals_9 = self.down2.conv.cr2.conv.bias primals_10 = self.center.cr1.conv.weight primals_11 = self.center.cr1.conv.bias primals_12 = self.center.cr2.conv.weight primals_13 = self.center.cr2.conv.bias primals_14 = self.up2.conv.cr1.conv.weight primals_15 = self.up2.conv.cr1.conv.bias primals_16 = self.up2.conv.cr2.conv.weight primals_17 = self.up2.conv.cr2.conv.bias primals_18 = self.up1.conv.cr1.conv.weight primals_19 = self.up1.conv.cr1.conv.bias primals_20 = self.up1.conv.cr2.conv.weight primals_21 = self.up1.conv.cr2.conv.bias primals_22 = self.final.weight primals_23 = self.final.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0]
lvxiuwang/ferattention
AttentionNet
false
7,188
[ "MIT" ]
1
02e97df4a12129ed6706bddf0d2109650eae8765
https://github.com/lvxiuwang/ferattention/tree/02e97df4a12129ed6706bddf0d2109650eae8765
MaxPool3x3
import torch import torch.nn as nn import torch.utils.data class MaxPool3x3(nn.Module): """3x3 max pool with no subsampling.""" def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(MaxPool3x3, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size, stride, padding) def forward(self, x): x = self.maxpool(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tl.store(out_ptr0 + x4, tmp51, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MaxPool3x3New(nn.Module): """3x3 max pool with no subsampling.""" def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(MaxPool3x3New, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size, stride, padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mc-nya/unnas
MaxPool3x3
false
7,189
[ "MIT" ]
1
f778bb743144cf56ce2a48ccca20e9f3a97a7b84
https://github.com/mc-nya/unnas/tree/f778bb743144cf56ce2a48ccca20e9f3a97a7b84
MultiHeadAttention
import math import torch import typing import torch.multiprocessing from torch import nn from torch.nn import functional as F import torch.optim import torch.utils.data import torch.distributed class MultiHeadAttention(nn.Module): def __init__(self, channels: 'int', out_channels: 'int', n_heads: 'int', p_dropout: 'float'=0.0, window_size: 'typing.Optional[int]'=None, heads_share: 'bool'=True, block_length: 'typing.Optional[int]'=None, proximal_bias: 'bool'=False, proximal_init: 'bool'=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.p_dropout = p_dropout self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.proximal_init = proximal_init self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) nn.init.xavier_uniform_(self.conv_v.weight) if proximal_init: with torch.no_grad(): self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query / math.sqrt(self.k_channels), key. transpose(-2, -1)) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query / math.sqrt( self.k_channels), key_relative_embeddings) scores_local = self._relative_position_to_absolute_position( rel_logits) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s).type_as(scores ) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: assert t_s == t_t, 'Local attention is only available for self-attention.' block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores.masked_fill(block_mask == 0, -10000.0) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, (0, 0, pad_length, pad_length, 0, 0)) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, (0, 1, 0, 0, 0, 0, 0, 0)) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, (0, length - 1, 0, 0, 0, 0)) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, (0, length - 1, 0, 0, 0, 0, 0, 0)) x_flat = x.view([batch, heads, length * length + length * (length - 1)] ) x_flat = F.pad(x_flat, (length, 0, 0, 0, 0, 0)) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import typing import torch.multiprocessing from torch import nn from torch.nn import functional as F import torch.optim import torch.utils.data import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf3, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf4 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = buf2 del buf2 triton_poi_fused_convolution_1[grid(64)](buf8, primals_8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_1[grid(64)](buf11, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class MultiHeadAttentionNew(nn.Module): def __init__(self, channels: 'int', out_channels: 'int', n_heads: 'int', p_dropout: 'float'=0.0, window_size: 'typing.Optional[int]'=None, heads_share: 'bool'=True, block_length: 'typing.Optional[int]'=None, proximal_bias: 'bool'=False, proximal_init: 'bool'=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.p_dropout = p_dropout self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.proximal_init = proximal_init self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) nn.init.xavier_uniform_(self.conv_v.weight) if proximal_init: with torch.no_grad(): self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query / math.sqrt(self.k_channels), key. transpose(-2, -1)) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query / math.sqrt( self.k_channels), key_relative_embeddings) scores_local = self._relative_position_to_absolute_position( rel_logits) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s).type_as(scores ) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: assert t_s == t_t, 'Local attention is only available for self-attention.' block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores.masked_fill(block_mask == 0, -10000.0) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, (0, 0, pad_length, pad_length, 0, 0)) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, (0, 1, 0, 0, 0, 0, 0, 0)) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, (0, length - 1, 0, 0, 0, 0)) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, (0, length - 1, 0, 0, 0, 0, 0, 0)) x_flat = x.view([batch, heads, length * length + length * (length - 1)] ) x_flat = F.pad(x_flat, (length, 0, 0, 0, 0, 0)) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def forward(self, input_0, input_1): primals_1 = self.conv_q.weight primals_2 = self.conv_q.bias primals_4 = self.conv_k.weight primals_5 = self.conv_k.bias primals_7 = self.conv_v.weight primals_8 = self.conv_v.bias primals_9 = self.conv_o.weight primals_10 = self.conv_o.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
mbarnig/vits-train
MultiHeadAttention
false
7,190
[ "MIT" ]
1
cfb8a0fc91daad868fe3d062ebf85d62edbd7506
https://github.com/mbarnig/vits-train/tree/cfb8a0fc91daad868fe3d062ebf85d62edbd7506
ChannelNorm
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class ChannelNorm(Module): """ ## Channel Normalization This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise. """ def __init__(self, channels, groups, eps: 'float'=1e-05, affine: 'bool' =True): """ * `groups` is the number of groups the features are divided into * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() self.channels = channels self.groups = groups self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(groups)) self.shift = nn.Parameter(torch.zeros(groups)) def forward(self, x: 'torch.Tensor'): """ `x` is a tensor of shape `[batch_size, channels, *]`. `*` denotes any number of (possibly 0) dimensions. For example, in an image (2D) convolution this will be `[batch_size, channels, height, width]` """ x_shape = x.shape batch_size = x_shape[0] assert self.channels == x.shape[1] x = x.view(batch_size, self.groups, -1) mean = x.mean(dim=[-1], keepdim=True) mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True) var = mean_x2 - mean ** 2 x_norm = (x - mean) / torch.sqrt(var + self.eps) if self.affine: x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1) return x_norm.view(x_shape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + 0) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp23 = tl.load(in_ptr2 + 0) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp20 = tmp0 - tmp11 tmp21 = tmp20 / tmp17 tmp22 = tmp19 * tmp21 tmp25 = tmp22 + tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp25, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 1, 64), (64, 64, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, buf1, buf3 class ChannelNormNew(Module): """ ## Channel Normalization This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise. """ def __init__(self, channels, groups, eps: 'float'=1e-05, affine: 'bool' =True): """ * `groups` is the number of groups the features are divided into * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() self.channels = channels self.groups = groups self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(groups)) self.shift = nn.Parameter(torch.zeros(groups)) def forward(self, input_0): primals_2 = self.scale primals_3 = self.shift primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mcx/annotated_deep_learning_paper_implementations
ChannelNorm
false
7,191
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
NodeNetwork
import torch import torch.nn.functional as F from torch.nn.parameter import Parameter def global_sum_pool(X, batch_mat): if batch_mat is None or batch_mat.dim() == 1: return torch.sum(X, dim=0).unsqueeze(0) else: return torch.mm(batch_mat, X) class BasicGraphConvolutionLayer(torch.nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.W2 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.W1 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.bias = Parameter(torch.zeros(out_channels, dtype=torch.float32)) def forward(self, X, A): potential_msgs = torch.mm(X, self.W2) propagated_msgs = torch.mm(A, potential_msgs) root_update = torch.mm(X, self.W1) output = propagated_msgs + root_update + self.bias return output class NodeNetwork(torch.nn.Module): def __init__(self, input_features): super().__init__() self.conv_1 = BasicGraphConvolutionLayer(input_features, 32) self.conv_2 = BasicGraphConvolutionLayer(32, 32) self.fc_1 = torch.nn.Linear(32, 16) self.out_layer = torch.nn.Linear(16, 2) def forward(self, X, A, batch_mat): x = self.conv_1(X, A).clamp(0) x = self.conv_2(x, A).clamp(0) output = global_sum_pool(x, batch_mat) output = self.fc_1(output) output = self.out_layer(output) return F.softmax(output, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_add_clamp_ge_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp4 >= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 32), (32, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32), (32, 1)) assert_size_stride(primals_7, (32, 32), (32, 1)) assert_size_stride(primals_8, (32,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (16, 32), (32, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (2, 16), (16, 1)) assert_size_stride(primals_13, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels._mm_plus_mm(primals_3, buf0, primals_2, primals_4, out=buf1) del primals_4 buf2 = buf0 del buf0 buf12 = empty_strided_cuda((4, 32), (32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_clamp_ge_0[grid(128)](buf1, primals_5, buf2, buf12, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf3 = buf1 del buf1 extern_kernels.mm(buf2, primals_6, out=buf3) buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3 del buf3 extern_kernels.mm(buf2, primals_7, out=buf5) buf6 = empty_strided_cuda((4, 32), (32, 1), torch.float32) buf11 = empty_strided_cuda((4, 32), (32, 1), torch.bool) triton_poi_fused_add_clamp_ge_1[grid(128)](buf4, buf5, primals_8, buf6, buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_8 buf7 = buf5 del buf5 extern_kernels.mm(primals_9, buf6, out=buf7) del buf6 buf8 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_11, buf7, reinterpret_tensor( primals_10, (32, 16), (1, 32), 0), alpha=1, beta=1, out=buf8) del primals_11 buf9 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_13, buf8, reinterpret_tensor( primals_12, (16, 2), (1, 16), 0), alpha=1, beta=1, out=buf9) del primals_13 buf10 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused__softmax_2[grid(8)](buf9, buf10, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf9 return (buf10, buf7, buf8, buf10, primals_12, primals_10, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), buf11, reinterpret_tensor(buf2, (32, 4), (1, 32), 0), reinterpret_tensor( primals_7, (32, 32), (1, 32), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_6, (32, 32), (1, 32), 0), buf12, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)) def global_sum_pool(X, batch_mat): if batch_mat is None or batch_mat.dim() == 1: return torch.sum(X, dim=0).unsqueeze(0) else: return torch.mm(batch_mat, X) class BasicGraphConvolutionLayer(torch.nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.W2 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.W1 = Parameter(torch.rand((in_channels, out_channels), dtype= torch.float32)) self.bias = Parameter(torch.zeros(out_channels, dtype=torch.float32)) def forward(self, X, A): potential_msgs = torch.mm(X, self.W2) propagated_msgs = torch.mm(A, potential_msgs) root_update = torch.mm(X, self.W1) output = propagated_msgs + root_update + self.bias return output class NodeNetworkNew(torch.nn.Module): def __init__(self, input_features): super().__init__() self.conv_1 = BasicGraphConvolutionLayer(input_features, 32) self.conv_2 = BasicGraphConvolutionLayer(32, 32) self.fc_1 = torch.nn.Linear(32, 16) self.out_layer = torch.nn.Linear(16, 2) def forward(self, input_0, input_1, input_2): primals_1 = self.conv_1.W2 primals_4 = self.conv_1.W1 primals_5 = self.conv_1.bias primals_6 = self.conv_2.W2 primals_7 = self.conv_2.W1 primals_8 = self.conv_2.bias primals_10 = self.fc_1.weight primals_11 = self.fc_1.bias primals_12 = self.out_layer.weight primals_13 = self.out_layer.bias primals_2 = input_0 primals_3 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
mbrukman/machine-learning-book
NodeNetwork
false
7,192
[ "MIT" ]
1
f29a0f8aafa63a77081f3bcec68866e33dd41776
https://github.com/mbrukman/machine-learning-book/tree/f29a0f8aafa63a77081f3bcec68866e33dd41776
FFN
import torch import typing import torch.multiprocessing from torch import nn from torch.nn import functional as F import torch.optim import torch.utils.data import torch.distributed class FFN(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', filter_channels: 'int', kernel_size: 'int', p_dropout: 'float'=0.0, activation: 'typing.Optional[str]'=None, causal: 'bool'=False): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.activation = activation self.causal = causal if causal: self.padding = self._causal_padding else: self.padding = self._same_padding self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) self.drop = nn.Dropout(p_dropout) def forward(self, x, x_mask): x = self.conv_1(self.padding(x * x_mask)) if self.activation == 'gelu': x = x * torch.sigmoid(1.702 * x) else: x = torch.relu(x) x = self.drop(x) x = self.conv_2(self.padding(x * x_mask)) return x * x_mask def _causal_padding(self, x): if self.kernel_size == 1: return x pad_l = self.kernel_size - 1 pad_r = 0 x = F.pad(x, (pad_l, pad_r, 0, 0, 0, 0)) return x def _same_padding(self, x): if self.kernel_size == 1: return x pad_l = (self.kernel_size - 1) // 2 pad_r = self.kernel_size // 2 x = F.pad(x, (pad_l, pad_r, 0, 0, 0, 0)) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'filter_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import typing import torch.multiprocessing from torch import nn from torch.nn import functional as F import torch.optim import torch.utils.data import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = xindex // 7 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_mul_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x3 = xindex // 7 x1 = xindex // 7 % 4 x4 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x3), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x1, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = tl.load(in_ptr2 + (-1 + x0 + 4 * x3), tmp5 & xmask, other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_mul_0[grid(112)](primals_1, primals_2, buf0, 112, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_mul_relu_1[grid(112)](buf1 , primals_4, primals_2, buf2, 112, XBLOCK=128, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_mul_2[grid(64)](buf4, primals_6, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(64)](buf1, primals_4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del primals_4 return buf4, primals_2, primals_3, primals_5, buf0, buf2, buf5 class FFNNew(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', filter_channels: 'int', kernel_size: 'int', p_dropout: 'float'=0.0, activation: 'typing.Optional[str]'=None, causal: 'bool'=False): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.activation = activation self.causal = causal if causal: self.padding = self._causal_padding else: self.padding = self._same_padding self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) self.drop = nn.Dropout(p_dropout) def _causal_padding(self, x): if self.kernel_size == 1: return x pad_l = self.kernel_size - 1 pad_r = 0 x = F.pad(x, (pad_l, pad_r, 0, 0, 0, 0)) return x def _same_padding(self, x): if self.kernel_size == 1: return x pad_l = (self.kernel_size - 1) // 2 pad_r = self.kernel_size // 2 x = F.pad(x, (pad_l, pad_r, 0, 0, 0, 0)) return x def forward(self, input_0, input_1): primals_1 = self.conv_1.weight primals_4 = self.conv_1.bias primals_2 = self.conv_2.weight primals_6 = self.conv_2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
mbarnig/vits-train
FFN
false
7,193
[ "MIT" ]
1
cfb8a0fc91daad868fe3d062ebf85d62edbd7506
https://github.com/mbarnig/vits-train/tree/cfb8a0fc91daad868fe3d062ebf85d62edbd7506
DiscriminatorLoss
from torch.nn import Module import torch import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class DiscriminatorLoss(Module): """ ## Discriminator Loss We want to find $w$ to maximize $$\\mathbb{E}_{x \\sim \\mathbb{P}_r} [f_w(x)]- \\mathbb{E}_{z \\sim p(z)} [f_w(g_ heta(z))]$$, so we minimize, $$- rac{1}{m} \\sum_{i=1}^m f_w ig(x^{(i)} ig) + rac{1}{m} \\sum_{i=1}^m f_w ig( g_ heta(z^{(i)}) ig)$$ """ def forward(self, f_real: 'torch.Tensor', f_fake: 'torch.Tensor'): """ * `f_real` is $f_w(x)$ * `f_fake` is $f_w(g_ heta(z))$ This returns the a tuple with losses for $f_w(x)$ and $f_w(g_ heta(z))$, which are later added. They are kept separate for logging. """ return F.relu(1 - f_real).mean(), F.relu(1 + f_fake).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_relu_rsub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) @triton.jit def triton_per_fused_add_mean_relu_1(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_relu_rsub_0[grid(1)](buf2, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 triton_per_fused_add_mean_relu_1[grid(1)](buf3, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 return buf2, buf3 class DiscriminatorLossNew(Module): """ ## Discriminator Loss We want to find $w$ to maximize $$\\mathbb{E}_{x \\sim \\mathbb{P}_r} [f_w(x)]- \\mathbb{E}_{z \\sim p(z)} [f_w(g_ heta(z))]$$, so we minimize, $$- rac{1}{m} \\sum_{i=1}^m f_w ig(x^{(i)} ig) + rac{1}{m} \\sum_{i=1}^m f_w ig( g_ heta(z^{(i)}) ig)$$ """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
mcx/annotated_deep_learning_paper_implementations
DiscriminatorLoss
false
7,194
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
Model
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(Model, self).__init__() self.layer1 = nn.Linear(input_size, hidden_size) self.layer2 = nn.Linear(hidden_size, output_size) def forward(self, x): x = self.layer1(x) x = nn.Sigmoid()(x) x = self.layer2(x) x = nn.Softmax(dim=1)(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_4 class ModelNew(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(ModelNew, self).__init__() self.layer1 = nn.Linear(input_size, hidden_size) self.layer2 = nn.Linear(hidden_size, output_size) def forward(self, input_0): primals_1 = self.layer1.weight primals_2 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mbrukman/machine-learning-book
Model
false
7,195
[ "MIT" ]
1
f29a0f8aafa63a77081f3bcec68866e33dd41776
https://github.com/mbrukman/machine-learning-book/tree/f29a0f8aafa63a77081f3bcec68866e33dd41776
ClippedValueFunctionLoss
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class ClippedValueFunctionLoss(Module): """ ## Clipped Value Function Loss Similarly we clip the value function update also. egin{align} V^{\\pi_ heta}_{CLIP}(s_t) &= clip\\Bigl(V^{\\pi_ heta}(s_t) - \\hat{V_t}, -\\epsilon, +\\epsilon\\Bigr) \\ \\mathcal{L}^{VF}( heta) &= rac{1}{2} \\mathbb{E} iggl[ max\\Bigl(igl(V^{\\pi_ heta}(s_t) - R_tigr)^2, igl(V^{\\pi_ heta}_{CLIP}(s_t) - R_tigr)^2\\Bigr) iggr] \\end{align} Clipping makes sure the value function $V_ heta$ doesn't deviate significantly from $V_{ heta_{OLD}}$. """ def forward(self, value: 'torch.Tensor', sampled_value: 'torch.Tensor', sampled_return: 'torch.Tensor', clip: 'float'): clipped_value = sampled_value + (value - sampled_value).clamp(min=- clip, max=clip) vf_loss = torch.max((value - sampled_return) ** 2, (clipped_value - sampled_return) ** 2) return 0.5 * vf_loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp6 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp0 - tmp4 tmp7 = -tmp6 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp9 = triton_helpers.minimum(tmp8, tmp6) tmp10 = tmp4 + tmp9 tmp11 = tmp10 - tmp1 tmp12 = tmp11 * tmp11 tmp13 = triton_helpers.maximum(tmp3, tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = 0.5 tmp20 = tmp18 * tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0[grid(1)](buf1 , arg0_1, arg3_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1 ) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf1, class ClippedValueFunctionLossNew(Module): """ ## Clipped Value Function Loss Similarly we clip the value function update also. egin{align} V^{\\pi_ heta}_{CLIP}(s_t) &= clip\\Bigl(V^{\\pi_ heta}(s_t) - \\hat{V_t}, -\\epsilon, +\\epsilon\\Bigr) \\ \\mathcal{L}^{VF}( heta) &= rac{1}{2} \\mathbb{E} iggl[ max\\Bigl(igl(V^{\\pi_ heta}(s_t) - R_tigr)^2, igl(V^{\\pi_ heta}_{CLIP}(s_t) - R_tigr)^2\\Bigr) iggr] \\end{align} Clipping makes sure the value function $V_ heta$ doesn't deviate significantly from $V_{ heta_{OLD}}$. """ def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
ClippedValueFunctionLoss
false
7,196
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
CrossEntropyBayesRisk
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class CrossEntropyBayesRisk(Module): """ <a id="CrossEntropyBayesRisk"></a> ## Bayes Risk with Cross Entropy Loss Bayes risk is the overall maximum cost of making incorrect estimates. It takes a cost function that gives the cost of making an incorrect estimate and sums it over all possible outcomes based on probability distribution. Here the cost function is cross-entropy loss, for one-hot coded $\\mathbf{y}$ $$\\sum_{k=1}^K -y_k \\log p_k$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K -y_k \\log p_k \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\psi(S) - \\psi( extcolor{orange}{lpha_k} ) igg) \\end{align} where $\\psi(\\cdot)$ is the $digamma$ function. """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 strength = alpha.sum(dim=-1) loss = (target * (torch.digamma(strength)[:, None] - torch.digamma( alpha))).sum(dim=-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_mean_mul_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + 4 * r3, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + 4 * r0 + 16 * r2), None, eviction_policy ='evict_last') tmp13 = tl.load(in_ptr2 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3 + 4 * r0 + 16 * r2), None, eviction_policy ='evict_last') tmp19 = tl.load(in_ptr2 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp8 = tmp6 - tmp7 tmp9 = tmp5 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp12 - tmp13 tmp15 = tmp11 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp18 - tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp26 = 64.0 tmp27 = tmp25 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = torch.ops.aten.digamma.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_1[grid(256)](arg0_1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf4 = torch.ops.aten.digamma.default(buf3) del buf3 buf5 = buf4 del buf4 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_per_fused_mean_mul_sub_sum_2[grid(1)](buf8, arg1_1, buf2, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf2 del buf5 return buf8, class CrossEntropyBayesRiskNew(Module): """ <a id="CrossEntropyBayesRisk"></a> ## Bayes Risk with Cross Entropy Loss Bayes risk is the overall maximum cost of making incorrect estimates. It takes a cost function that gives the cost of making an incorrect estimate and sums it over all possible outcomes based on probability distribution. Here the cost function is cross-entropy loss, for one-hot coded $\\mathbf{y}$ $$\\sum_{k=1}^K -y_k \\log p_k$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K -y_k \\log p_k \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\psi(S) - \\psi( extcolor{orange}{lpha_k} ) igg) \\end{align} where $\\psi(\\cdot)$ is the $digamma$ function. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
CrossEntropyBayesRisk
false
7,197
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
DPFP
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class DPFP(Module): """ ## Deterministic Parameter Free Project (DPFP) This is the new projection function $ extcolor{lightgreen}{\\phi}$ introduced in the paper. DPFP projects $k$ of dimensionality $d_{key}$ to dimensionality $d_{dot} = 2 d_{key} u$, where $ u \\in \\{1, 2, ..., 2 d_{key} - 1 \\}$ is a hyper-parameter. $$ extcolor{lightgreen}{\\phi_{2 d_{key} (i - 1) + j}(k)} = ext{ReLU}\\Big(ig[k, -kig]\\Big)_{j} ext{ReLU}\\Big(ig[k, -kig]\\Big)_{i + j}$$ where $ig[k, -kig]$ is the concatenation of $k$ and $-k$ to give a vector of size $2 d_{key}$, $i \\in \\{1, 2, ..., u \\}$, and $j \\in \\{1, 2, ..., 2 d_{key}\\}$. $x_i$ is the $i$-th element of vector $x$ and is rolled around if $i$ is larger than the number of elements in $x$. Basically, it creates a new vector by multiplying elements of $[k, -k]$ shifted by $i$. This produces projections that are sparse (only a few elements of $phi$ are non-zero) and orthogonal ($ extcolor{lightgreen}{\\phi(k^{(i)})} \\cdot extcolor{lightgreen}{\\phi(k^{(j)})} pprox 0$ for most $i, j$ unless $k^{(i)}$ and $k^{(j)}$ are very similar. ### Normalization Paper introduces a simple normalization for $ extcolor{lightgreen}{\\phi}$, $$ extcolor{lightgreen}{\\phi '(k)} = rac{ extcolor{lightgreen}{\\phi(k)}}{\\sum^{d_{dot}}_{j=1} extcolor{lightgreen}{\\phi(k)_j}}$$ *Check the paper for derivation.* """ def __init__(self, nu: 'int'=1, eps: 'float'=1e-06): """ * `nu` is the hyper-parameter $ u$. * `eps` is the small value used to make sure there is no division-by-zero when normalizing. """ super().__init__() self.nu = nu self.relu = nn.ReLU() self.eps = eps def forward(self, k: 'torch.Tensor'): k = self.dpfp(k) return k / (torch.sum(k, dim=-1, keepdim=True) + self.eps) def dpfp(self, k: 'torch.Tensor'): """ $$ extcolor{lightgreen}{\\phi(k)}$$ """ x = self.relu(torch.cat([k, -k], dim=-1)) x_rolled = [x.roll(shifts=i, dims=-1) for i in range(1, self.nu + 1)] x_rolled = torch.cat(x_rolled, dim=-1) x_repeat = torch.cat([x] * self.nu, dim=-1) return x_repeat * x_rolled def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_cat_div_mul_relu_roll_sum_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + r1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * x0 + (-4 + r1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp14 = tl.full([1, 1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = (7 + r1) % 8 tmp18 = tmp16 < tmp3 tmp19 = tl.load(in_ptr0 + (4 * x0 + (7 + r1) % 8), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp16 >= tmp3 tmp22 = tl.load(in_ptr0 + (4 * x0 + (-4 + (7 + r1) % 8)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = -tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp20, tmp23, tmp24) tmp26 = tl.where(tmp18, tmp19, tmp25) tmp27 = triton_helpers.maximum(tmp14, tmp26) tmp28 = tmp15 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = 1e-06 tmp34 = tmp32 + tmp33 tmp35 = tmp28 / tmp34 tl.store(out_ptr2 + (r1 + 8 * x0), tmp35, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_cat_div_mul_relu_roll_sum_0[grid(64)](arg0_1, buf2, 64, 8, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class DPFPNew(Module): """ ## Deterministic Parameter Free Project (DPFP) This is the new projection function $ extcolor{lightgreen}{\\phi}$ introduced in the paper. DPFP projects $k$ of dimensionality $d_{key}$ to dimensionality $d_{dot} = 2 d_{key} u$, where $ u \\in \\{1, 2, ..., 2 d_{key} - 1 \\}$ is a hyper-parameter. $$ extcolor{lightgreen}{\\phi_{2 d_{key} (i - 1) + j}(k)} = ext{ReLU}\\Big(ig[k, -kig]\\Big)_{j} ext{ReLU}\\Big(ig[k, -kig]\\Big)_{i + j}$$ where $ig[k, -kig]$ is the concatenation of $k$ and $-k$ to give a vector of size $2 d_{key}$, $i \\in \\{1, 2, ..., u \\}$, and $j \\in \\{1, 2, ..., 2 d_{key}\\}$. $x_i$ is the $i$-th element of vector $x$ and is rolled around if $i$ is larger than the number of elements in $x$. Basically, it creates a new vector by multiplying elements of $[k, -k]$ shifted by $i$. This produces projections that are sparse (only a few elements of $phi$ are non-zero) and orthogonal ($ extcolor{lightgreen}{\\phi(k^{(i)})} \\cdot extcolor{lightgreen}{\\phi(k^{(j)})} pprox 0$ for most $i, j$ unless $k^{(i)}$ and $k^{(j)}$ are very similar. ### Normalization Paper introduces a simple normalization for $ extcolor{lightgreen}{\\phi}$, $$ extcolor{lightgreen}{\\phi '(k)} = rac{ extcolor{lightgreen}{\\phi(k)}}{\\sum^{d_{dot}}_{j=1} extcolor{lightgreen}{\\phi(k)_j}}$$ *Check the paper for derivation.* """ def __init__(self, nu: 'int'=1, eps: 'float'=1e-06): """ * `nu` is the hyper-parameter $ u$. * `eps` is the small value used to make sure there is no division-by-zero when normalizing. """ super().__init__() self.nu = nu self.relu = nn.ReLU() self.eps = eps def dpfp(self, k: 'torch.Tensor'): """ $$ extcolor{lightgreen}{\\phi(k)}$$ """ x = self.relu(torch.cat([k, -k], dim=-1)) x_rolled = [x.roll(shifts=i, dims=-1) for i in range(1, self.nu + 1)] x_rolled = torch.cat(x_rolled, dim=-1) x_repeat = torch.cat([x] * self.nu, dim=-1) return x_repeat * x_rolled def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
DPFP
false
7,198
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
KLDivLoss
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class KLDivLoss(Module): """ ## KL-Divergence loss This calculates the KL divergence between a given normal distribution and $\\mathcal{N}(0, 1)$ """ def forward(self, sigma_hat: 'torch.Tensor', mu: 'torch.Tensor'): return -0.5 * torch.mean(1 + sigma_hat - mu ** 2 - torch.exp(sigma_hat) ) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tmp13 = -0.5 tmp14 = tmp12 * tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_exp_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class KLDivLossNew(Module): """ ## KL-Divergence loss This calculates the KL divergence between a given normal distribution and $\\mathcal{N}(0, 1)$ """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
KLDivLoss
false
7,199
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
MaximumLikelihoodLoss
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class MaximumLikelihoodLoss(Module): """ <a id="MaximumLikelihoodLoss"></a> ## Type II Maximum Likelihood Loss The distribution $D(\\mathbf{p} ert extcolor{orange}{\\mathbf{lpha}})$ is a prior on the likelihood $Multi(\\mathbf{y} ert p)$, and the negative log marginal likelihood is calculated by integrating over class probabilities $\\mathbf{p}$. If target probabilities (one-hot targets) are $y_k$ for a given sample the loss is, egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\prod_{k=1}^K p_k^{y_k} rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\log S - \\log extcolor{orange}{lpha_k} igg) \\end{align} """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 strength = alpha.sum(dim=-1) loss = (target * (strength.log()[:, None] - alpha.log())).sum(dim=-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (16 * r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (1 + 16 * r0 + 64 * r2), None, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 16 * r0 + 64 * r2), None, eviction_policy ='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * r3, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (4 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (5 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (6 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (7 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (8 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr1 + (9 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr1 + (10 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr1 + (11 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr1 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp57 = tl.load(in_ptr0 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr1 + (12 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr1 + (13 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp63 = tl.load(in_ptr1 + (14 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr1 + (15 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr1 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp5 = tmp4 + tmp2 tmp6 = tmp3 + tmp5 tmp8 = tmp7 + tmp2 tmp9 = tmp6 + tmp8 tmp11 = tmp10 + tmp2 tmp12 = tmp9 + tmp11 tmp13 = tl_math.log(tmp12) tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = tmp0 * tmp17 tmp21 = tmp20 + tmp2 tmp23 = tmp22 + tmp2 tmp24 = tmp21 + tmp23 tmp26 = tmp25 + tmp2 tmp27 = tmp24 + tmp26 tmp29 = tmp28 + tmp2 tmp30 = tmp27 + tmp29 tmp31 = tl_math.log(tmp30) tmp33 = tmp32 + tmp2 tmp34 = tl_math.log(tmp33) tmp35 = tmp31 - tmp34 tmp36 = tmp19 * tmp35 tmp37 = tmp18 + tmp36 tmp40 = tmp39 + tmp2 tmp42 = tmp41 + tmp2 tmp43 = tmp40 + tmp42 tmp45 = tmp44 + tmp2 tmp46 = tmp43 + tmp45 tmp48 = tmp47 + tmp2 tmp49 = tmp46 + tmp48 tmp50 = tl_math.log(tmp49) tmp52 = tmp51 + tmp2 tmp53 = tl_math.log(tmp52) tmp54 = tmp50 - tmp53 tmp55 = tmp38 * tmp54 tmp56 = tmp37 + tmp55 tmp59 = tmp58 + tmp2 tmp61 = tmp60 + tmp2 tmp62 = tmp59 + tmp61 tmp64 = tmp63 + tmp2 tmp65 = tmp62 + tmp64 tmp67 = tmp66 + tmp2 tmp68 = tmp65 + tmp67 tmp69 = tl_math.log(tmp68) tmp71 = tmp70 + tmp2 tmp72 = tl_math.log(tmp71) tmp73 = tmp69 - tmp72 tmp74 = tmp57 * tmp73 tmp75 = tmp56 + tmp74 tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK]) tmp78 = tl.sum(tmp76, 1)[:, None] tmp79 = 64.0 tmp80 = tmp78 / tmp79 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp80, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_log_mean_mul_sub_sum_0[grid(1)](buf2, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class MaximumLikelihoodLossNew(Module): """ <a id="MaximumLikelihoodLoss"></a> ## Type II Maximum Likelihood Loss The distribution $D(\\mathbf{p} ert extcolor{orange}{\\mathbf{lpha}})$ is a prior on the likelihood $Multi(\\mathbf{y} ert p)$, and the negative log marginal likelihood is calculated by integrating over class probabilities $\\mathbf{p}$. If target probabilities (one-hot targets) are $y_k$ for a given sample the loss is, egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\prod_{k=1}^K p_k^{y_k} rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\log S - \\log extcolor{orange}{lpha_k} igg) \\end{align} """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
MaximumLikelihoodLoss
false
7,200
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
FCVAE
import torch from torch.nn import functional as F from torch import nn class BaseVAE(nn.Module): """ Base abstract class for the Variational Autoencoders """ def __init__(self, channels=1, width=28, height=28, z_dim=2): """ Constructor Parameters: channels - The number of channels for the image width - The width of the image in pixels height - The height of the image in pixels z_dim - The dimension of the latent space """ super(BaseVAE, self).__init__() self.channels = channels self.width = width self.height = height self.z_dim = z_dim def getNbChannels(self): """ Returns the number of channels of the handled images """ return self.channels def getWidth(self): """ Returns the width of the handled images in pixels """ return self.width def getHeight(self): """ Returns the height of the handled images in pixels """ return self.height def getZDim(self): """ Returns the dimension of the latent space of the VAE """ return self.z_dim def flatten(self, x): """ Can be used to flatten the output image. This method will only handle images of the original size specified for the network """ return x.view(-1, self.channels * self.height * self.width) def unflatten(self, x): """ Can be used to unflatten an image handled by the network. This method will only handle images of the original size specified for the network """ return x.view(-1, self.channels, self.height, self.width) class FCVAE(BaseVAE): """ Fully connected Variational Autoencoder """ def __init__(self, channels=1, width=28, height=28, z_dim=2): super(FCVAE, self).__init__(channels, width, height, z_dim) self.fc1 = nn.Linear(self.channels * self.width * self.height, 400) self.fc21 = nn.Linear(400, self.z_dim) self.fc22 = nn.Linear(400, self.z_dim) self.fc3 = nn.Linear(self.z_dim, 400) self.fc4 = nn.Linear(400, self.channels * self.width * self.height) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(self.flatten(x)) z = self.reparameterize(mu, logvar) return self.unflatten(self.decode(z)), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = tmp3 * tmp5 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400,), (1,)) assert_size_stride(primals_4, (2, 400), (400, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (2, 400), (400, 1)) assert_size_stride(primals_7, (2,), (1,)) assert_size_stride(primals_8, (400, 2), (2, 1)) assert_size_stride(primals_9, (400,), (1,)) assert_size_stride(primals_10, (784, 400), (400, 1)) assert_size_stride(primals_11, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 2), (1, 400), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 2), (1, 400), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = torch.ops.aten.randn.default([4, 2], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(8)](buf2, buf5, buf3, buf6, 8, XBLOCK=8, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (2, 400), (1, 2), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_0[grid(1600)](buf8, primals_9, 1600, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf9) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 784), (784, 1), torch.float32) triton_poi_fused_sigmoid_sigmoid_backward_2[grid(3136)](buf10, primals_11, buf11, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (reinterpret_tensor(buf10, (4, 1, 28, 28), (784, 784, 28, 1), 0), buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf11, primals_10, primals_8, primals_6, primals_4) class BaseVAE(nn.Module): """ Base abstract class for the Variational Autoencoders """ def __init__(self, channels=1, width=28, height=28, z_dim=2): """ Constructor Parameters: channels - The number of channels for the image width - The width of the image in pixels height - The height of the image in pixels z_dim - The dimension of the latent space """ super(BaseVAE, self).__init__() self.channels = channels self.width = width self.height = height self.z_dim = z_dim def getNbChannels(self): """ Returns the number of channels of the handled images """ return self.channels def getWidth(self): """ Returns the width of the handled images in pixels """ return self.width def getHeight(self): """ Returns the height of the handled images in pixels """ return self.height def getZDim(self): """ Returns the dimension of the latent space of the VAE """ return self.z_dim def flatten(self, x): """ Can be used to flatten the output image. This method will only handle images of the original size specified for the network """ return x.view(-1, self.channels * self.height * self.width) def unflatten(self, x): """ Can be used to unflatten an image handled by the network. This method will only handle images of the original size specified for the network """ return x.view(-1, self.channels, self.height, self.width) class FCVAENew(BaseVAE): """ Fully connected Variational Autoencoder """ def __init__(self, channels=1, width=28, height=28, z_dim=2): super(FCVAENew, self).__init__(channels, width, height, z_dim) self.fc1 = nn.Linear(self.channels * self.width * self.height, 400) self.fc21 = nn.Linear(400, self.z_dim) self.fc22 = nn.Linear(400, self.z_dim) self.fc3 = nn.Linear(self.z_dim, 400) self.fc4 = nn.Linear(400, self.channels * self.width * self.height) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc21.weight primals_5 = self.fc21.bias primals_6 = self.fc22.weight primals_7 = self.fc22.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_10 = self.fc4.weight primals_11 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
mbusy/vae
FCVAE
false
7,201
[ "MIT" ]
1
455e382a557b72fc944460331e5dd010ff83a76a
https://github.com/mbusy/vae/tree/455e382a557b72fc944460331e5dd010ff83a76a
PatchEmbeddings
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class PatchEmbeddings(Module): """ <a id="PatchEmbeddings"></a> ## Get patch embeddings The paper splits the image into patches of equal size and do a linear transformation on the flattened pixels for each patch. We implement the same thing through a convolution layer, because it's simpler to implement. """ def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'): """ * `d_model` is the transformer embeddings size * `patch_size` is the size of the patch * `in_channels` is the number of channels in the input image (3 for rgb) """ super().__init__() self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride= patch_size) def forward(self, x: 'torch.Tensor'): """ * `x` is the input image of shape `[batch_size, channels, height, width]` """ x = self.conv(x) bs, c, h, w = x.shape x = x.permute(2, 3, 0, 1) x = x.view(h * w, bs, c) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'patch_size': 4, 'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (1, 4, 4), (1, 4, 1), 0 ), primals_1, primals_3 class PatchEmbeddingsNew(Module): """ <a id="PatchEmbeddings"></a> ## Get patch embeddings The paper splits the image into patches of equal size and do a linear transformation on the flattened pixels for each patch. We implement the same thing through a convolution layer, because it's simpler to implement. """ def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'): """ * `d_model` is the transformer embeddings size * `patch_size` is the size of the patch * `in_channels` is the number of channels in the input image (3 for rgb) """ super().__init__() self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride= patch_size) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mcx/annotated_deep_learning_paper_implementations
PatchEmbeddings
false
7,202
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
LearnedPositionalEmbeddings
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class LearnedPositionalEmbeddings(Module): """ <a id="LearnedPositionalEmbeddings"></a> ## Add parameterized positional encodings This adds learned positional embeddings to patch embeddings. """ def __init__(self, d_model: 'int', max_len: 'int'=5000): """ * `d_model` is the transformer embeddings size * `max_len` is the maximum number of patches """ super().__init__() self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True) def forward(self, x: 'torch.Tensor'): """ * `x` is the patch embeddings of shape `[patches, batch_size, d_model]` """ pe = self.positional_encodings[x.shape[0]] return x + pe def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (5000, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LearnedPositionalEmbeddingsNew(Module): """ <a id="LearnedPositionalEmbeddings"></a> ## Add parameterized positional encodings This adds learned positional embeddings to patch embeddings. """ def __init__(self, d_model: 'int', max_len: 'int'=5000): """ * `d_model` is the transformer embeddings size * `max_len` is the maximum number of patches """ super().__init__() self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True) def forward(self, input_0): primals_1 = self.positional_encodings primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
mcx/annotated_deep_learning_paper_implementations
LearnedPositionalEmbeddings
false
7,203
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
LSTMCell
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class LSTMCell(Module): """ ## Long Short-Term Memory Cell LSTM Cell computes $c$, and $h$. $c$ is like the long-term memory, and $h$ is like the short term memory. We use the input $x$ and $h$ to update the long term memory. In the update, some features of $c$ are cleared with a forget gate $f$, and some features $i$ are added through a gate $g$. The new short term memory is the $ anh$ of the long-term memory multiplied by the output gate $o$. Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it. Also $c$ never goes through a linear transformation. This is what solves vanishing and exploding gradients. Here's the update rule. egin{align} c_t &= \\sigma(f_t) \\odot c_{t-1} + \\sigma(i_t) \\odot anh(g_t) \\ h_t &= \\sigma(o_t) \\odot anh(c_t) \\end{align} $\\odot$ stands for element-wise multiplication. Intermediate values and gates are computed as linear transformations of the hidden state and input. egin{align} i_t &= lin_x^i(x_t) + lin_h^i(h_{t-1}) \\ f_t &= lin_x^f(x_t) + lin_h^f(h_{t-1}) \\ g_t &= lin_x^g(x_t) + lin_h^g(h_{t-1}) \\ o_t &= lin_x^o(x_t) + lin_h^o(h_{t-1}) \\end{align} """ def __init__(self, input_size: 'int', hidden_size: 'int', layer_norm: 'bool'=False): super().__init__() self.hidden_lin = nn.Linear(hidden_size, 4 * hidden_size) self.input_lin = nn.Linear(input_size, 4 * hidden_size, bias=False) if layer_norm: self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(4)]) self.layer_norm_c = nn.LayerNorm(hidden_size) else: self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(4)]) self.layer_norm_c = nn.Identity() def forward(self, x: 'torch.Tensor', h: 'torch.Tensor', c: 'torch.Tensor'): ifgo = self.hidden_lin(h) + self.input_lin(x) ifgo = ifgo.chunk(4, dim=-1) ifgo = [self.layer_norm[i](ifgo[i]) for i in range(4)] i, f, g, o = ifgo c_next = torch.sigmoid(f) * c + torch.sigmoid(i) * torch.tanh(g) h_next = torch.sigmoid(o) * torch.tanh(self.layer_norm_c(c_next)) return h_next, c_next def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp18 = tl.load(in_ptr3 + x2, xmask) tmp25 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp26 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp19 = tmp17 * tmp18 tmp20 = tmp5 * tmp11 tmp21 = tmp19 + tmp20 tmp22 = 1.0 tmp23 = tmp22 - tmp17 tmp24 = tmp17 * tmp23 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = libdevice.tanh(tmp21) tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp21, xmask) tl.store(out_ptr3 + x2, tmp24, xmask) tl.store(out_ptr4 + x2, tmp30, xmask) tl.store(out_ptr5 + x2, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(256)]( buf0, primals_2, buf1, primals_6, buf2, buf3, buf4, buf7, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 return buf6, buf4, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0 ), buf2, buf3, buf4, buf5, buf7 class LSTMCellNew(Module): """ ## Long Short-Term Memory Cell LSTM Cell computes $c$, and $h$. $c$ is like the long-term memory, and $h$ is like the short term memory. We use the input $x$ and $h$ to update the long term memory. In the update, some features of $c$ are cleared with a forget gate $f$, and some features $i$ are added through a gate $g$. The new short term memory is the $ anh$ of the long-term memory multiplied by the output gate $o$. Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it. Also $c$ never goes through a linear transformation. This is what solves vanishing and exploding gradients. Here's the update rule. egin{align} c_t &= \\sigma(f_t) \\odot c_{t-1} + \\sigma(i_t) \\odot anh(g_t) \\ h_t &= \\sigma(o_t) \\odot anh(c_t) \\end{align} $\\odot$ stands for element-wise multiplication. Intermediate values and gates are computed as linear transformations of the hidden state and input. egin{align} i_t &= lin_x^i(x_t) + lin_h^i(h_{t-1}) \\ f_t &= lin_x^f(x_t) + lin_h^f(h_{t-1}) \\ g_t &= lin_x^g(x_t) + lin_h^g(h_{t-1}) \\ o_t &= lin_x^o(x_t) + lin_h^o(h_{t-1}) \\end{align} """ def __init__(self, input_size: 'int', hidden_size: 'int', layer_norm: 'bool'=False): super().__init__() self.hidden_lin = nn.Linear(hidden_size, 4 * hidden_size) self.input_lin = nn.Linear(input_size, 4 * hidden_size, bias=False) if layer_norm: self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(4)]) self.layer_norm_c = nn.LayerNorm(hidden_size) else: self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(4)]) self.layer_norm_c = nn.Identity() def forward(self, input_0, input_1, input_2): primals_1 = self.hidden_lin.weight primals_2 = self.hidden_lin.bias primals_4 = self.input_lin.weight primals_3 = input_0 primals_5 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
mcx/annotated_deep_learning_paper_implementations
LSTMCell
false
7,204
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
SquaredReLU
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SquaredReLU(Module): """ ## Squared ReLU activation $$y = {\\max(x, 0)}^2$$ Squared ReLU is used as the activation function in the [position wise feedforward module](../feed_forward.html). """ def __init__(self): super().__init__() self.relu = nn.ReLU() def forward(self, x: 'torch.Tensor'): x = self.relu(x) return x * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquaredReLUNew(Module): """ ## Squared ReLU activation $$y = {\\max(x, 0)}^2$$ Squared ReLU is used as the activation function in the [position wise feedforward module](../feed_forward.html). """ def __init__(self): super().__init__() self.relu = nn.ReLU() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
SquaredReLU
false
7,205
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
MarginLoss
from torch.nn import Module import torch import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class MarginLoss(Module): '\n ## Margin loss for class existence\n\n A separate margin loss is used for each output capsule and the total loss is the sum of them.\n The length of each output capsule is the probability that class is present in the input.\n\n Loss for each output capsule or class $k$ is,\n $$\\mathcal{L}_k = T_k \\max(0, m^{+} - \\lVert\\mathbf{v}_k\rVert)^2 +\n \\lambda (1 - T_k) \\max(0, \\lVert\\mathbf{v}_k\rVert - m^{-})^2$$\n\n $T_k$ is $1$ if the class $k$ is present and $0$ otherwise.\n The first component of the loss is $0$ when the class is not present,\n and the second component is $0$ if the class is present.\n The $\\max(0, x)$ is used to avoid predictions going to extremes.\n $m^{+}$ is set to be $0.9$ and $m^{-}$ to be $0.1$ in the paper.\n\n The $\\lambda$ down-weighting is used to stop the length of all capsules from\n falling during the initial phase of training.\n ' def __init__(self, *, n_labels: int, lambda_: float=0.5, m_positive: float=0.9, m_negative: float=0.1): super().__init__() self.m_negative = m_negative self.m_positive = m_positive self.lambda_ = lambda_ self.n_labels = n_labels def forward(self, v: 'torch.Tensor', labels: 'torch.Tensor'): """ `v`, $\\mathbf{v}_j$ are the squashed output capsules. This has shape `[batch_size, n_labels, n_features]`; that is, there is a capsule for each label. `labels` are the labels, and has shape `[batch_size]`. """ v_norm = torch.sqrt((v ** 2).sum(dim=-1)) labels = torch.eye(self.n_labels, device=labels.device)[labels] loss = labels * F.relu(self.m_positive - v_norm) + self.lambda_ * ( 1.0 - labels) * F.relu(v_norm - self.m_negative) return loss.sum(dim=-1).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'n_labels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tmp4 tmp7 = x0 tmp8 = tmp6 == tmp7 tmp9 = 1.0 tmp10 = 0.0 tmp11 = tl.where(tmp8, tmp9, tmp10) tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 0.9 tmp25 = tmp24 - tmp23 tmp26 = tl.full([1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = tmp11 * tmp27 tmp29 = tmp9 - tmp11 tmp30 = 0.5 tmp31 = tmp29 * tmp30 tmp32 = 0.1 tmp33 = tmp23 - tmp32 tmp34 = triton_helpers.maximum(tmp26, tmp33) tmp35 = tmp31 * tmp34 tmp36 = tmp28 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) @triton.jit def triton_per_fused_mean_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 16.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0[grid (64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_mean_sum_1[grid(1)](buf2, buf0, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, class MarginLossNew(Module): '\n ## Margin loss for class existence\n\n A separate margin loss is used for each output capsule and the total loss is the sum of them.\n The length of each output capsule is the probability that class is present in the input.\n\n Loss for each output capsule or class $k$ is,\n $$\\mathcal{L}_k = T_k \\max(0, m^{+} - \\lVert\\mathbf{v}_k\rVert)^2 +\n \\lambda (1 - T_k) \\max(0, \\lVert\\mathbf{v}_k\rVert - m^{-})^2$$\n\n $T_k$ is $1$ if the class $k$ is present and $0$ otherwise.\n The first component of the loss is $0$ when the class is not present,\n and the second component is $0$ if the class is present.\n The $\\max(0, x)$ is used to avoid predictions going to extremes.\n $m^{+}$ is set to be $0.9$ and $m^{-}$ to be $0.1$ in the paper.\n\n The $\\lambda$ down-weighting is used to stop the length of all capsules from\n falling during the initial phase of training.\n ' def __init__(self, *, n_labels: int, lambda_: float=0.5, m_positive: float=0.9, m_negative: float=0.1): super().__init__() self.m_negative = m_negative self.m_positive = m_positive self.lambda_ = lambda_ self.n_labels = n_labels def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
MarginLoss
false
7,206
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
Squash
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class Squash(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, s: 'torch.Tensor'): """ The shape of `s` is `[batch_size, n_capsules, n_features]` """ s2 = (s ** 2).sum(dim=-1, keepdims=True) return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquashNew(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
Squash
false
7,207
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
FeedForward
import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class FeedForward(nn.Module): """ ### Position-wise Feed Forward Layer $ ext{F\\small{FW}}$ This consists of two linear layers and an activation in the middle. """ def __init__(self, d_model: 'int', d_ff: 'int'): """ * `d_model` is the number of features in transformer embeddings * `d_ff` is the number features in the hidden layer """ super().__init__() self.lin1 = nn.Linear(d_model, d_ff) self.lin2 = nn.Linear(d_ff, d_model) self.act = nn.ReLU() self.norm = nn.LayerNorm(d_model) def forward(self, h: 'torch.Tensor'): """ `h` are the embeddings of shape `[batch_size, seq_len, d_model]` """ h_res = h h = self.norm(h) h = self.lin1(h) h = self.act(h) h = self.lin2(h) return h + h_res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4, primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (64, 4), (4, 1), 0 ), primals_6, buf7, primals_4 class FeedForwardNew(nn.Module): """ ### Position-wise Feed Forward Layer $ ext{F\\small{FW}}$ This consists of two linear layers and an activation in the middle. """ def __init__(self, d_model: 'int', d_ff: 'int'): """ * `d_model` is the number of features in transformer embeddings * `d_ff` is the number features in the hidden layer """ super().__init__() self.lin1 = nn.Linear(d_model, d_ff) self.lin2 = nn.Linear(d_ff, d_model) self.act = nn.ReLU() self.norm = nn.LayerNorm(d_model) def forward(self, input_0): primals_4 = self.lin1.weight primals_2 = self.lin1.bias primals_6 = self.lin2.weight primals_3 = self.lin2.bias primals_5 = self.norm.weight primals_7 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mcx/annotated_deep_learning_paper_implementations
FeedForward
false
7,208
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
BehaviorClone
import torch import torch.nn as nn import torch.nn.functional as F class BehaviorClone(nn.Module): def __init__(self, input_shape, output_shape): super(BehaviorClone, self).__init__() self.input_shape = input_shape self.output_shape = output_shape self.fc1 = nn.Linear(input_shape, input_shape // 2) self.fc2 = nn.Linear(input_shape // 2, output_shape) self.do = nn.Dropout(p=0.3) def forward(self, x): x = F.relu(self.fc1(x)) x = self.do(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': 4, 'output_shape': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1, primals_2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), ( 2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4, buf3 class BehaviorCloneNew(nn.Module): def __init__(self, input_shape, output_shape): super(BehaviorCloneNew, self).__init__() self.input_shape = input_shape self.output_shape = output_shape self.fc1 = nn.Linear(input_shape, input_shape // 2) self.fc2 = nn.Linear(input_shape // 2, output_shape) self.do = nn.Dropout(p=0.3) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mdiephuis/Berkeley-cs294-112
BehaviorClone
false
7,209
[ "MIT" ]
1
99559e046b635ca8d229f19ca4ad45c2c02a1c01
https://github.com/mdiephuis/Berkeley-cs294-112/tree/99559e046b635ca8d229f19ca4ad45c2c02a1c01
SpatialDepthWiseConvolution
from torch.nn import Module import math import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWiseConvolution(Module): """ ## Spatial Depth Wise Convolution This is actually slower """ def __init__(self, d_k: 'int', kernel_size: 'int'=3): """ * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size rng = 1 / math.sqrt(kernel_size) self.kernels = nn.Parameter(torch.zeros((kernel_size, d_k)). uniform_(-rng, rng)) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ res = x * self.kernels[0].view(1, 1, 1, -1) for i in range(1, len(self.kernels)): res[i:] += x[:-i] * self.kernels[i].view(1, 1, 1, -1) return res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_k': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x4 = xindex x0 = xindex % 4 tmp61 = tl.load(in_ptr0 + x4, xmask) tmp62 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 >= tmp3 tmp5 = tmp4 & tmp2 tmp6 = tmp4 & tmp5 tmp7 = tl.load(in_ptr0 + x4, tmp6 & xmask, other=0.0) tmp8 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (-64 + x4), tmp6 & xmask, other=0.0) tmp11 = tl.load(in_ptr1 + (4 + x0), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.load(in_ptr0 + x4, tmp5 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x0, tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.where(tmp4, tmp15, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp5, tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (-64 + x4), tmp5 & xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (4 + x0), tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp5, tmp25, tmp26) tmp28 = tl.load(in_ptr0 + x4, tmp2 & xmask, other=0.0) tmp29 = tl.load(in_ptr1 + x0, tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp28 * tmp29 tmp31 = tl.where(tmp4, tmp27, tmp30) tmp32 = tl.where(tmp4, tmp21, tmp31) tmp33 = tl.load(in_ptr0 + (-128 + x4), tmp2 & xmask, other=0.0) tmp34 = tl.load(in_ptr1 + (8 + x0), tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp4 & tmp4 tmp40 = tl.load(in_ptr0 + x4, tmp39 & xmask, other=0.0) tmp41 = tl.load(in_ptr1 + x0, tmp39 & xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tmp40 * tmp41 tmp43 = tl.load(in_ptr0 + (-64 + x4), tmp39 & xmask, other=0.0) tmp44 = tl.load(in_ptr1 + (4 + x0), tmp39 & xmask, eviction_policy= 'evict_last', other=0.0) tmp45 = tmp43 * tmp44 tmp46 = tmp42 + tmp45 tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp39, tmp46, tmp47) tmp49 = tl.load(in_ptr0 + x4, tmp4 & xmask, other=0.0) tmp50 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tmp49 * tmp50 tmp52 = tl.where(tmp4, tmp48, tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp4, tmp52, tmp53) tmp55 = tl.load(in_ptr0 + (-64 + x4), tmp4 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + (4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tmp55 * tmp56 tmp58 = tmp51 + tmp57 tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype) tmp60 = tl.where(tmp4, tmp58, tmp59) tmp63 = tmp61 * tmp62 tmp64 = tl.where(tmp4, tmp60, tmp63) tmp65 = tl.where(tmp4, tmp54, tmp64) tmp66 = tl.where(tmp2, tmp38, tmp65) tmp67 = tl.where(tmp2, tmp66, tmp66) tl.store(in_out_ptr0 + x4, tmp67, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (3, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf1, primals_2, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 return buf1, primals_2 class SpatialDepthWiseConvolutionNew(Module): """ ## Spatial Depth Wise Convolution This is actually slower """ def __init__(self, d_k: 'int', kernel_size: 'int'=3): """ * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size rng = 1 / math.sqrt(kernel_size) self.kernels = nn.Parameter(torch.zeros((kernel_size, d_k)). uniform_(-rng, rng)) def forward(self, input_0): primals_1 = self.kernels primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
mcx/annotated_deep_learning_paper_implementations
SpatialDepthWiseConvolution
false
7,210
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
KLDivergenceLoss
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class KLDivergenceLoss(Module): """ <a id="KLDivergenceLoss"></a> ## KL Divergence Regularization Loss This tries to shrink the total evidence to zero if the sample cannot be correctly classified. First we calculate $ ilde{lpha}_k = y_k + (1 - y_k) extcolor{orange}{lpha_k}$ the Dirichlet parameters after remove the correct evidence. egin{align} &KL \\Big[ D(\\mathbf{p} ert \\mathbf{ ilde{lpha}}) \\Big \\Vert D(\\mathbf{p} ert <1, \\dots, 1>\\Big] \\ &= \\log \\Bigg( rac{\\Gamma \\Big( \\sum_{k=1}^K ilde{lpha}_k \\Big)} {\\Gamma(K) \\prod_{k=1}^K \\Gamma( ilde{lpha}_k)} \\Bigg) + \\sum_{k=1}^K ( ilde{lpha}_k - 1) \\Big[ \\psi( ilde{lpha}_k) - \\psi( ilde{S}) \\Big] \\end{align} where $\\Gamma(\\cdot)$ is the gamma function, $\\psi(\\cdot)$ is the $digamma$ function and $ ilde{S} = \\sum_{k=1}^K ilde{lpha}_k$ """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 n_classes = evidence.shape[-1] alpha_tilde = target + (1 - target) * alpha strength_tilde = alpha_tilde.sum(dim=-1) first = torch.lgamma(alpha_tilde.sum(dim=-1)) - torch.lgamma( alpha_tilde.new_tensor(float(n_classes))) - torch.lgamma( alpha_tilde).sum(dim=-1) second = ((alpha_tilde - 1) * (torch.digamma(alpha_tilde) - torch. digamma(strength_tilde)[:, None])).sum(dim=-1) loss = first + second return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 + tmp1 tmp5 = tmp2 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 4 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr3 + (4 * r1 + 16 * r3), None, eviction_policy= 'evict_last') tmp44 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (1 + 4 * r1 + 16 * r3), None, eviction_policy ='evict_last') tmp50 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (2 + 4 * r1 + 16 * r3), None, eviction_policy ='evict_last') tmp56 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp57 = tl.load(in_ptr3 + (3 + 4 * r1 + 16 * r3), None, eviction_policy ='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 + tmp1 tmp5 = tmp2 * tmp4 tmp6 = tmp0 + tmp5 tmp8 = tmp1 - tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 * tmp10 tmp12 = tmp7 + tmp11 tmp13 = tmp6 + tmp12 tmp15 = tmp1 - tmp14 tmp17 = tmp16 + tmp1 tmp18 = tmp15 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp13 + tmp19 tmp22 = tmp1 - tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp22 * tmp24 tmp26 = tmp21 + tmp25 tmp27 = tmp20 + tmp26 tmp28 = libdevice.lgamma(tmp27) tmp29 = 1.7917594909667969 tmp30 = tmp28 - tmp29 tmp31 = libdevice.lgamma(tmp6) tmp32 = libdevice.lgamma(tmp12) tmp33 = tmp31 + tmp32 tmp34 = libdevice.lgamma(tmp19) tmp35 = tmp33 + tmp34 tmp36 = libdevice.lgamma(tmp26) tmp37 = tmp35 + tmp36 tmp38 = tmp6 - tmp1 tmp41 = tmp39 - tmp40 tmp42 = tmp38 * tmp41 tmp43 = tmp12 - tmp1 tmp46 = tmp44 - tmp45 tmp47 = tmp43 * tmp46 tmp48 = tmp42 + tmp47 tmp49 = tmp19 - tmp1 tmp52 = tmp50 - tmp51 tmp53 = tmp49 * tmp52 tmp54 = tmp48 + tmp53 tmp55 = tmp26 - tmp1 tmp58 = tmp56 - tmp57 tmp59 = tmp55 * tmp58 tmp60 = tmp54 + tmp59 tmp61 = tmp30 - tmp37 tmp62 = tmp61 + tmp60 tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK]) tmp65 = tl.sum(tmp63, 1)[:, None] tmp66 = 64.0 tmp67 = tmp65 / tmp66 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp67, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_0[grid(256)](arg1_1, arg0_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = torch.ops.aten.digamma.default(buf2) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_sum_1[grid(64)](buf2, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf6 = torch.ops.aten.digamma.default(buf5) del buf5 buf7 = buf6 del buf6 buf9 = empty_strided_cuda((), (), torch.float32) buf10 = buf9 del buf9 triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2[grid(1)](buf10, arg1_1, arg0_1, buf4, buf7, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf4 del buf7 return buf10, class KLDivergenceLossNew(Module): """ <a id="KLDivergenceLoss"></a> ## KL Divergence Regularization Loss This tries to shrink the total evidence to zero if the sample cannot be correctly classified. First we calculate $ ilde{lpha}_k = y_k + (1 - y_k) extcolor{orange}{lpha_k}$ the Dirichlet parameters after remove the correct evidence. egin{align} &KL \\Big[ D(\\mathbf{p} ert \\mathbf{ ilde{lpha}}) \\Big \\Vert D(\\mathbf{p} ert <1, \\dots, 1>\\Big] \\ &= \\log \\Bigg( rac{\\Gamma \\Big( \\sum_{k=1}^K ilde{lpha}_k \\Big)} {\\Gamma(K) \\prod_{k=1}^K \\Gamma( ilde{lpha}_k)} \\Bigg) + \\sum_{k=1}^K ( ilde{lpha}_k - 1) \\Big[ \\psi( ilde{lpha}_k) - \\psi( ilde{S}) \\Big] \\end{align} where $\\Gamma(\\cdot)$ is the gamma function, $\\psi(\\cdot)$ is the $digamma$ function and $ ilde{S} = \\sum_{k=1}^K ilde{lpha}_k$ """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
KLDivergenceLoss
false
7,211
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
SpatialDepthWisePerHeadConvolution
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWisePerHeadConvolution(Module): """ ## Spatial Depth Wise Per Head Convolution """ def __init__(self, heads: 'int', d_k: 'int', kernel_size: 'int'=3): """ * `heads` is the number of heads * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ seq_len, batch_size, heads, d_k = x.shape x = x.permute(1, 2, 3, 0) x = x.view(batch_size, heads * d_k, seq_len) x = self.conv(x) x = x[:, :, :-(self.kernel_size - 1)] x = x.view(batch_size, heads, d_k, seq_len) x = x.permute(3, 0, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'heads': 4, 'd_k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=16, bias=None) assert_size_stride(buf1, (4, 16, 6), (96, 6, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(384)](buf2, primals_3, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 16, 4), (16, 1, 64), 0) class SpatialDepthWisePerHeadConvolutionNew(Module): """ ## Spatial Depth Wise Per Head Convolution """ def __init__(self, heads: 'int', d_k: 'int', kernel_size: 'int'=3): """ * `heads` is the number of heads * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mcx/annotated_deep_learning_paper_implementations
SpatialDepthWisePerHeadConvolution
false
7,212
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
SpatialDepthWiseSharedConvolution
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWiseSharedConvolution(Module): """ ## Spatial Depth Wise Shared Convolution We share the same kernel across all channels. """ def __init__(self, kernel_size: 'int'=3): """ """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=( kernel_size,), padding=(kernel_size - 1,)) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ seq_len, batch_size, heads, d_k = x.shape x = x.permute(1, 2, 3, 0) x = x.view(batch_size * heads * d_k, 1, seq_len) x = self.conv(x) x = x[:, :, :-(self.kernel_size - 1)] x = x.view(batch_size, heads, d_k, seq_len) x = x.permute(3, 0, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (64, 1, 6), (6, 6, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(384)](buf2, primals_3, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0 ), primals_2, reinterpret_tensor(primals_1, (64, 1, 4), (1, 256, 64), 0 ) class SpatialDepthWiseSharedConvolutionNew(Module): """ ## Spatial Depth Wise Shared Convolution We share the same kernel across all channels. """ def __init__(self, kernel_size: 'int'=3): """ """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=( kernel_size,), padding=(kernel_size - 1,)) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mcx/annotated_deep_learning_paper_implementations
SpatialDepthWiseSharedConvolution
false
7,213
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
SquaredErrorBayesRisk
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class SquaredErrorBayesRisk(Module): """ <a id="SquaredErrorBayesRisk"></a> ## Bayes Risk with Squared Error Loss Here the cost function is squared error, $$\\sum_{k=1}^K (y_k - p_k)^2 = \\Vert \\mathbf{y} - \\mathbf{p} \\Vert_2^2$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K (y_k - p_k)^2 \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K \\mathbb{E} \\Big[ y_k^2 -2 y_k p_k + p_k^2 \\Big] \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\end{align} Where $$\\mathbb{E}[p_k] = \\hat{p}_k = rac{ extcolor{orange}{lpha_k}}{S}$$ is the expected probability when sampled from the Dirichlet distribution and $$\\mathbb{E}[p_k^2] = \\mathbb{E}[p_k]^2 + ext{Var}(p_k)$$ where $$ ext{Var}(p_k) = rac{ extcolor{orange}{lpha_k}(S - extcolor{orange}{lpha_k})}{S^2 (S + 1)} = rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1}$$ is the variance. This gives, egin{align} \\mathcal{L}(\\Theta) &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k]^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ig( y_k -\\mathbb{E}[p_k] ig)^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ( y_k -\\hat{p}_k)^2 + rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1} \\Big) \\end{align} This first part of the equation $ig(y_k -\\mathbb{E}[p_k]ig)^2$ is the error term and the second part is the variance. """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 strength = alpha.sum(dim=-1) p = alpha / strength[:, None] err = (target - p) ** 2 var = p * (1 - p) / (strength[:, None] + 1) loss = (err + var).sum(dim=-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (4 * x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy ='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy ='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * x2), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = tmp4 + tmp6 tmp9 = tmp8 + tmp1 tmp10 = tmp7 + tmp9 tmp12 = tmp11 + tmp1 tmp13 = tmp10 + tmp12 tmp14 = tmp2 / tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r3, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (16 * r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (1 + 16 * r0 + 64 * r2), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr2 + (2 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (3 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (5 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (6 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr2 + (7 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (8 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (9 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr2 + (10 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (11 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp63 = tl.load(in_ptr0 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr1 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp69 = tl.load(in_ptr2 + (12 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr2 + (13 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (14 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp77 = tl.load(in_ptr2 + (15 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp6 = tmp1 * tmp5 tmp8 = tmp7 + tmp4 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp16 = tmp15 + tmp4 tmp17 = tmp14 + tmp16 tmp18 = tmp17 + tmp4 tmp19 = tmp6 / tmp18 tmp20 = tmp3 + tmp19 tmp23 = tmp21 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp4 - tmp22 tmp26 = tmp22 * tmp25 tmp28 = tmp27 + tmp4 tmp30 = tmp29 + tmp4 tmp31 = tmp28 + tmp30 tmp33 = tmp32 + tmp4 tmp34 = tmp31 + tmp33 tmp36 = tmp35 + tmp4 tmp37 = tmp34 + tmp36 tmp38 = tmp37 + tmp4 tmp39 = tmp26 / tmp38 tmp40 = tmp24 + tmp39 tmp41 = tmp20 + tmp40 tmp44 = tmp42 - tmp43 tmp45 = tmp44 * tmp44 tmp46 = tmp4 - tmp43 tmp47 = tmp43 * tmp46 tmp49 = tmp48 + tmp4 tmp51 = tmp50 + tmp4 tmp52 = tmp49 + tmp51 tmp54 = tmp53 + tmp4 tmp55 = tmp52 + tmp54 tmp57 = tmp56 + tmp4 tmp58 = tmp55 + tmp57 tmp59 = tmp58 + tmp4 tmp60 = tmp47 / tmp59 tmp61 = tmp45 + tmp60 tmp62 = tmp41 + tmp61 tmp65 = tmp63 - tmp64 tmp66 = tmp65 * tmp65 tmp67 = tmp4 - tmp64 tmp68 = tmp64 * tmp67 tmp70 = tmp69 + tmp4 tmp72 = tmp71 + tmp4 tmp73 = tmp70 + tmp72 tmp75 = tmp74 + tmp4 tmp76 = tmp73 + tmp75 tmp78 = tmp77 + tmp4 tmp79 = tmp76 + tmp78 tmp80 = tmp79 + tmp4 tmp81 = tmp68 / tmp80 tmp82 = tmp66 + tmp81 tmp83 = tmp62 + tmp82 tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK]) tmp86 = tl.sum(tmp84, 1)[:, None] tmp87 = 64.0 tmp88 = tmp86 / tmp87 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp88, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1[grid(1)](buf3, arg1_1, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf0 return buf3, class SquaredErrorBayesRiskNew(Module): """ <a id="SquaredErrorBayesRisk"></a> ## Bayes Risk with Squared Error Loss Here the cost function is squared error, $$\\sum_{k=1}^K (y_k - p_k)^2 = \\Vert \\mathbf{y} - \\mathbf{p} \\Vert_2^2$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K (y_k - p_k)^2 \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K \\mathbb{E} \\Big[ y_k^2 -2 y_k p_k + p_k^2 \\Big] \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\end{align} Where $$\\mathbb{E}[p_k] = \\hat{p}_k = rac{ extcolor{orange}{lpha_k}}{S}$$ is the expected probability when sampled from the Dirichlet distribution and $$\\mathbb{E}[p_k^2] = \\mathbb{E}[p_k]^2 + ext{Var}(p_k)$$ where $$ ext{Var}(p_k) = rac{ extcolor{orange}{lpha_k}(S - extcolor{orange}{lpha_k})}{S^2 (S + 1)} = rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1}$$ is the variance. This gives, egin{align} \\mathcal{L}(\\Theta) &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k]^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ig( y_k -\\mathbb{E}[p_k] ig)^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ( y_k -\\hat{p}_k)^2 + rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1} \\Big) \\end{align} This first part of the equation $ig(y_k -\\mathbb{E}[p_k]ig)^2$ is the error term and the second part is the variance. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mcx/annotated_deep_learning_paper_implementations
SquaredErrorBayesRisk
false
7,214
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
MNIST_Discriminator
import torch import torch.nn as nn from torch.nn import functional as F class MNIST_Discriminator(nn.Module): def __init__(self, latent_size): super(MNIST_Discriminator, self).__init__() self.latent_size = latent_size self.linear1 = nn.Linear(self.latent_size, self.latent_size // 2) self.linear2 = nn.Linear(self.latent_size // 2, 1) def forward(self, x): x = F.leaky_relu(self.linear1(x), 0.2) x = torch.sigmoid(self.linear2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 2), (2, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(128)](buf0, primals_2, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 1), (1, 2), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_sigmoid_1[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), buf4, primals_4 class MNIST_DiscriminatorNew(nn.Module): def __init__(self, latent_size): super(MNIST_DiscriminatorNew, self).__init__() self.latent_size = latent_size self.linear1 = nn.Linear(self.latent_size, self.latent_size // 2) self.linear2 = nn.Linear(self.latent_size // 2, 1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mdiephuis/adversarial-autoencoders
MNIST_Discriminator
false
7,215
[ "MIT" ]
1
a722239564362796774de21a64fd92e81dce4089
https://github.com/mdiephuis/adversarial-autoencoders/tree/a722239564362796774de21a64fd92e81dce4089
MNIST_Encoder
import torch import torch.nn as nn from torch.nn import functional as F class MNIST_Encoder(nn.Module): def __init__(self, in_channels, latent_size): super(MNIST_Encoder, self).__init__() self.in_channels = in_channels self.latent_size = latent_size self.linear1 = nn.Linear(self.in_channels, self.latent_size) self.linear2 = nn.Linear(self.latent_size, self.latent_size) def forward(self, x): x = F.leaky_relu(self.linear1(x), 0.2) x = torch.tanh(self.linear2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_tanh_1[grid(256)](buf4, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, primals_4 class MNIST_EncoderNew(nn.Module): def __init__(self, in_channels, latent_size): super(MNIST_EncoderNew, self).__init__() self.in_channels = in_channels self.latent_size = latent_size self.linear1 = nn.Linear(self.in_channels, self.latent_size) self.linear2 = nn.Linear(self.latent_size, self.latent_size) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mdiephuis/adversarial-autoencoders
MNIST_Encoder
false
7,216
[ "MIT" ]
1
a722239564362796774de21a64fd92e81dce4089
https://github.com/mdiephuis/adversarial-autoencoders/tree/a722239564362796774de21a64fd92e81dce4089
MNIST_Generator
import torch import torch.nn as nn from torch.nn import functional as F class MNIST_Generator(nn.Module): def __init__(self, out_channels, latent_size): super(MNIST_Generator, self).__init__() self.out_channels = out_channels self.latent_size = latent_size self.linear1 = nn.Linear(self.latent_size, self.out_channels) self.linear2 = nn.Linear(self.out_channels, self.out_channels) def forward(self, x): x = F.leaky_relu(self.linear1(x), 0.2) x = F.leaky_relu(self.linear2(x), 0.2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_channels': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(256)](buf3, primals_5, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, primals_4 class MNIST_GeneratorNew(nn.Module): def __init__(self, out_channels, latent_size): super(MNIST_GeneratorNew, self).__init__() self.out_channels = out_channels self.latent_size = latent_size self.linear1 = nn.Linear(self.latent_size, self.out_channels) self.linear2 = nn.Linear(self.out_channels, self.out_channels) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mdiephuis/adversarial-autoencoders
MNIST_Generator
false
7,217
[ "MIT" ]
1
a722239564362796774de21a64fd92e81dce4089
https://github.com/mdiephuis/adversarial-autoencoders/tree/a722239564362796774de21a64fd92e81dce4089
Discriminator
import torch import torch.nn as nn from torch.nn import functional as F class Discriminator(nn.Module): def __init__(self, latent_size, d=128): super(Discriminator, self).__init__() self.latent_size = latent_size self.d = d self.linear1 = nn.Linear(self.latent_size, self.d) self.linear2 = nn.Linear(self.d, self.d) self.linear3 = nn.Linear(self.d, 1) def forward(self, x): x = F.leaky_relu(self.linear1(x), 0.2) x = F.leaky_relu(self.linear2(x), 0.2) x = torch.sigmoid(self.linear3(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(8192)](buf0, primals_2, buf1, buf2, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(8192)](buf3, primals_5, buf4, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_sigmoid_1[grid(64)](buf7, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 128), (128, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 128), (128, 1), 0 ), buf7, primals_6, primals_4 class DiscriminatorNew(nn.Module): def __init__(self, latent_size, d=128): super(DiscriminatorNew, self).__init__() self.latent_size = latent_size self.d = d self.linear1 = nn.Linear(self.latent_size, self.d) self.linear2 = nn.Linear(self.d, self.d) self.linear3 = nn.Linear(self.d, 1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mdiephuis/adversarial-autoencoders
Discriminator
false
7,218
[ "MIT" ]
1
a722239564362796774de21a64fd92e81dce4089
https://github.com/mdiephuis/adversarial-autoencoders/tree/a722239564362796774de21a64fd92e81dce4089
MemoryEfficientPFLU
from torch.autograd import Function import torch from torch import nn class PFLUFunction(Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x * (1 + x / torch.sqrt(1 + x * x)) / 2 @staticmethod def backward(ctx, grad_output): x, = ctx.saved_tensors grad_x = None if ctx.needs_input_grad[0]: t = 1 / (1 + x * x) grad_x = grad_output * (1 + x * torch.sqrt(t) * (1 + t)) / 2 return grad_x class MemoryEfficientPFLU(nn.Module): def forward(self, x): return PFLUFunction.apply(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = libdevice.sqrt(tmp3) tmp5 = tmp0 / tmp4 tmp6 = tmp5 + tmp2 tmp7 = tmp0 * tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class PFLUFunction(Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x * (1 + x / torch.sqrt(1 + x * x)) / 2 @staticmethod def backward(ctx, grad_output): x, = ctx.saved_tensors grad_x = None if ctx.needs_input_grad[0]: t = 1 / (1 + x * x) grad_x = grad_output * (1 + x * torch.sqrt(t) * (1 + t)) / 2 return grad_x class MemoryEfficientPFLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mengzhu0308/PFLU-FPFLU
MemoryEfficientPFLU
false
7,219
[ "Apache-2.0" ]
1
628cd472db2913e555e902bdf35af834f84a284b
https://github.com/mengzhu0308/PFLU-FPFLU/tree/628cd472db2913e555e902bdf35af834f84a284b
FPFLU
import torch from torch import nn class FPFLU(nn.Module): def forward(self, x): return torch.maximum(x, x / (1 + x * x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_maximum_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tmp5 = triton_helpers.maximum(tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_maximum_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FPFLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mengzhu0308/PFLU-FPFLU
FPFLU
false
7,220
[ "Apache-2.0" ]
1
628cd472db2913e555e902bdf35af834f84a284b
https://github.com/mengzhu0308/PFLU-FPFLU/tree/628cd472db2913e555e902bdf35af834f84a284b
WQ
import torch import torch.nn as nn def stats_quant(x, nbit, qmode='symm', dequantize=True): z_typical = {'4bit': [0.077, 1.013], '8bit': [0.027, 1.114]} z = z_typical[f'{int(nbit)}bit'] m = x.abs().mean() std = x.std() if qmode == 'symm': n_lv = 2 ** (nbit - 1) - 1 alpha_w = 1 / z[0] * std - z[1] / z[0] * m elif qmode == 'asymm': n_lv = (2 ** nbit - 1) / 2 alpha_w = 2 * m else: raise NotImplementedError x = x.clamp(-alpha_w.item(), alpha_w.item()) scale = n_lv / alpha_w xq = x.mul(scale).round() if len(xq.unique()) > 2 ** nbit: xq = xq.clamp(-2 ** nbit // 2, 2 ** nbit // 2 - 1) if dequantize: xq = xq.div(scale) return xq, scale class RoundQ(torch.autograd.Function): @staticmethod def forward(ctx, input, wbit, qmode): input_q, _scale = stats_quant(input, wbit, qmode) ctx.save_for_backward(input) return input_q @staticmethod def backward(ctx, grad_output): grad_input = grad_output.clone() return grad_input, None, None class WQ(nn.Module): """ Weight quantizer """ def __init__(self, wbit, qmode='symm'): super(WQ, self).__init__() self.wbit = wbit self.qmode = qmode def forward(self, x): weight_q = RoundQ.apply(x, self.wbit, self.qmode) return weight_q def extra_repr(self): return super(WQ, self).extra_repr() + 'qmode={}'.format(self.qmode) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'wbit': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_std_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tl_math.abs(tmp0) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 255.0 tmp19 = tmp13 / tmp18 tmp20 = libdevice.sqrt(tmp19) tmp21 = 12.987012987012987 tmp22 = tmp20 * tmp21 tmp23 = 256.0 tmp24 = tmp17 / tmp23 tmp25 = 13.155844155844155 tmp26 = tmp24 * tmp25 tmp27 = tmp22 - tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1 del buf1 get_raw_stream(0) triton_per_fused_abs_mean_mul_std_sub_0[grid(1)](buf4, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf4, def stats_quant(x, nbit, qmode='symm', dequantize=True): z_typical = {'4bit': [0.077, 1.013], '8bit': [0.027, 1.114]} z = z_typical[f'{int(nbit)}bit'] m = x.abs().mean() std = x.std() if qmode == 'symm': n_lv = 2 ** (nbit - 1) - 1 alpha_w = 1 / z[0] * std - z[1] / z[0] * m elif qmode == 'asymm': n_lv = (2 ** nbit - 1) / 2 alpha_w = 2 * m else: raise NotImplementedError x = x.clamp(-alpha_w.item(), alpha_w.item()) scale = n_lv / alpha_w xq = x.mul(scale).round() if len(xq.unique()) > 2 ** nbit: xq = xq.clamp(-2 ** nbit // 2, 2 ** nbit // 2 - 1) if dequantize: xq = xq.div(scale) return xq, scale class RoundQ(torch.autograd.Function): @staticmethod def forward(ctx, input, wbit, qmode): input_q, _scale = stats_quant(input, wbit, qmode) ctx.save_for_backward(input) return input_q @staticmethod def backward(ctx, grad_output): grad_input = grad_output.clone() return grad_input, None, None class WQNew(nn.Module): """ Weight quantizer """ def __init__(self, wbit, qmode='symm'): super(WQNew, self).__init__() self.wbit = wbit self.qmode = qmode def extra_repr(self): return super(WQNew, self).extra_repr() + 'qmode={}'.format(self.qmode) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mengjian0502/TorchInference_SRAM
WQ
false
7,221
[ "MIT" ]
1
fcc465c73b79f2ab670b6af03aa53f9bb47c64ca
https://github.com/mengjian0502/TorchInference_SRAM/tree/fcc465c73b79f2ab670b6af03aa53f9bb47c64ca
Coxnnet
import torch import numpy as np import torch.nn as nn class Coxnnet(nn.Module): def __init__(self, nfeat): super(Coxnnet, self).__init__() self.fc1 = nn.Linear(nfeat, int(np.ceil(nfeat ** 0.5))) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(int(np.ceil(nfeat ** 0.5)), 1) self.init_hidden() def forward(self, x, coo=None): x = torch.tanh(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x def init_hidden(self): nn.init.xavier_normal_(self.fc1.weight) nn.init.xavier_normal_(self.fc2.weight) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 2), (2, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), ( 2, 1), 0), reinterpret_tensor(primals_4, (2, 1), (1, 2), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4 class CoxnnetNew(nn.Module): def __init__(self, nfeat): super(CoxnnetNew, self).__init__() self.fc1 = nn.Linear(nfeat, int(np.ceil(nfeat ** 0.5))) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(int(np.ceil(nfeat ** 0.5)), 1) self.init_hidden() def init_hidden(self): nn.init.xavier_normal_(self.fc1.weight) nn.init.xavier_normal_(self.fc2.weight) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
menggerSherry/SAVAE-Cox
Coxnnet
false
7,222
[ "Apache-2.0" ]
1
c087ab4f267da28db7eb497c844bea59e65ed125
https://github.com/menggerSherry/SAVAE-Cox/tree/c087ab4f267da28db7eb497c844bea59e65ed125
MVNormalNetwork
import torch import torch.nn as nn class MVNormalNetwork(nn.Module): def __init__(self, latent_dim): super().__init__() self.mean = nn.Linear(latent_dim, latent_dim) self.sc = nn.Linear(latent_dim, latent_dim) def forward(self, x): mean = self.mean(x) sc = self.sc(x) return mean, torch.diag_embed(torch.exp(sc)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp3 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp0 = x0 tmp1 = x1 tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = 0.0 tmp6 = tl.where(tmp2, tmp4, tmp5) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_diag_embed_0[grid(1024)](buf1, buf2, 1024, XBLOCK= 128, num_warps=4, num_stages=1) return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class MVNormalNetworkNew(nn.Module): def __init__(self, latent_dim): super().__init__() self.mean = nn.Linear(latent_dim, latent_dim) self.sc = nn.Linear(latent_dim, latent_dim) def forward(self, input_0): primals_1 = self.mean.weight primals_2 = self.mean.bias primals_4 = self.sc.weight primals_5 = self.sc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
mgb45/OC-notebooks
MVNormalNetwork
false
7,223
[ "MIT" ]
1
67b1899d1fb3455ab3caab58f94429b9f432164b
https://github.com/mgb45/OC-notebooks/tree/67b1899d1fb3455ab3caab58f94429b9f432164b
Conv1d_samePadding
import torch from torch import nn import torch.nn.functional as F class Conv1d_samePadding(nn.Conv1d): def __init__(self, *args, padding: int=0, **kwargs): assert padding == 0, "no additional padding on top of 'same' padding" kwargs['padding'] = 0 super().__init__(*args, **kwargs) def same_padding_1d(self, input): input_duration = input.size(2) filter_duration = self.weight.size(2) out_duration = (input_duration + self.stride[0] - 1) // self.stride[0] padding_duration = max(0, (out_duration - 1) * self.stride[0] + ( filter_duration - 1) * self.dilation[0] + 1 - input_duration) duration_odd = padding_duration % 2 input = F.pad(input, (padding_duration // 2, padding_duration // 2 + int(duration_odd))) return input def forward(self, input): input = self.same_padding_1d(input) return super().forward(input) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = xindex // 7 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(112)](primals_1, buf0, 112, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 class Conv1d_samePaddingNew(nn.Conv1d): def __init__(self, *args, padding: int=0, **kwargs): assert padding == 0, "no additional padding on top of 'same' padding" kwargs['padding'] = 0 super().__init__(*args, **kwargs) def same_padding_1d(self, input): input_duration = input.size(2) filter_duration = self.weight.size(2) out_duration = (input_duration + self.stride[0] - 1) // self.stride[0] padding_duration = max(0, (out_duration - 1) * self.stride[0] + ( filter_duration - 1) * self.dilation[0] + 1 - input_duration) duration_odd = padding_duration % 2 input = F.pad(input, (padding_duration // 2, padding_duration // 2 + int(duration_odd))) return input def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mgrachten/crepe-pytorch
Conv1d_samePadding
false
7,224
[ "MIT" ]
1
94305a78d2d82e414c251d50b63dc021af277c75
https://github.com/mgrachten/crepe-pytorch/tree/94305a78d2d82e414c251d50b63dc021af277c75
NALUCell
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.parameter import Parameter class NeuralAccumulatorCell(nn.Module): """A Neural Accumulator (NAC) cell [1]. Attributes: in_dim: size of the input sample. out_dim: size of the output sample. Sources: [1]: https://arxiv.org/abs/1808.00508 """ def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.W_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.M_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.register_parameter('W_hat', self.W_hat) self.register_parameter('M_hat', self.M_hat) self.register_parameter('bias', None) self._reset_params() def _reset_params(self): init.kaiming_uniform_(self.W_hat) init.kaiming_uniform_(self.M_hat) def forward(self, input): W = torch.tanh(self.W_hat) * torch.sigmoid(self.M_hat) return F.linear(input, W, self.bias) def extra_repr(self): return 'in_dim={}, out_dim={}'.format(self.in_dim, self.out_dim) class NALUCell(nn.Module): """A Neural Arithmetic Logic Unit (NALU) cell [1]. Attributes: in_dim: size of the input sample. out_dim: size of the output sample. Sources: [1]: https://arxiv.org/abs/1808.00508 """ def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.eps = 1e-10 self.G = Parameter(torch.Tensor(out_dim, in_dim)) self.nac = NeuralAccumulatorCell(in_dim, out_dim) self.register_parameter('bias', None) init.kaiming_uniform_(self.G, a=math.sqrt(5)) def forward(self, input: 'torch.Tensor'): a = self.nac(input) g = F.linear(input, self.G, self.bias).sigmoid() add_sub = g * a log_input = (input.abs() + self.eps).log() m = self.nac(log_input).exp() mul_div = (1 - g) * m y = add_sub + mul_div return y def extra_repr(self): return 'in_dim={}, out_dim={}'.format(self.in_dim, self.out_dim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_abs_add_log_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.abs(tmp0) tmp2 = 1e-10 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_rsub_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tmp3 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_abs_add_log_1[grid(256)](primals_3, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf4) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_exp_mul_rsub_sigmoid_2[grid(256)](buf2, buf1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, primals_2, reinterpret_tensor(primals_3, (64, 4 ), (4, 1), 0), buf1, buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), buf4 class NeuralAccumulatorCell(nn.Module): """A Neural Accumulator (NAC) cell [1]. Attributes: in_dim: size of the input sample. out_dim: size of the output sample. Sources: [1]: https://arxiv.org/abs/1808.00508 """ def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.W_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.M_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.register_parameter('W_hat', self.W_hat) self.register_parameter('M_hat', self.M_hat) self.register_parameter('bias', None) self._reset_params() def _reset_params(self): init.kaiming_uniform_(self.W_hat) init.kaiming_uniform_(self.M_hat) def forward(self, input): W = torch.tanh(self.W_hat) * torch.sigmoid(self.M_hat) return F.linear(input, W, self.bias) def extra_repr(self): return 'in_dim={}, out_dim={}'.format(self.in_dim, self.out_dim) class NALUCellNew(nn.Module): """A Neural Arithmetic Logic Unit (NALU) cell [1]. Attributes: in_dim: size of the input sample. out_dim: size of the output sample. Sources: [1]: https://arxiv.org/abs/1808.00508 """ def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.eps = 1e-10 self.G = Parameter(torch.Tensor(out_dim, in_dim)) self.nac = NeuralAccumulatorCell(in_dim, out_dim) self.register_parameter('bias', None) init.kaiming_uniform_(self.G, a=math.sqrt(5)) def extra_repr(self): return 'in_dim={}, out_dim={}'.format(self.in_dim, self.out_dim) def forward(self, input_0): primals_1 = self.G primals_2 = self.nac.W_hat primals_4 = self.nac.M_hat primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
mikomel/machine-number-sense
NALUCell
false
7,225
[ "MIT" ]
1
173b67e4f25bd8249ba4a41904d4cd4af26bae05
https://github.com/mikomel/machine-number-sense/tree/173b67e4f25bd8249ba4a41904d4cd4af26bae05
MHAttention
import math import torch from torch import nn import torch.nn.functional as F class MHAttention(nn.Module): def __init__(self, ninp, nhead, dropout): super(MHAttention, self).__init__() if ninp % nhead != 0: raise ValueError( 'The hidden size is not a multiple of the number of attention heads' ) self.nhead = nhead self.ninp = ninp self.fc_query = nn.Linear(ninp, ninp) self.fc_key = nn.Linear(ninp, ninp) self.fc_value = nn.Linear(ninp, ninp) self.dropout = nn.Dropout(dropout) def transpose_for_scores(self, x): """ x has shape (*, L, C) return shape (*, nhead, L, C/nhead) """ new_shape = x.shape[:-1] + (self.nhead, -1) x = x.view(*new_shape) return x.transpose(-3, -2) def forward_fn(self, x): """ x has shape (*, L, C) return shape (*, L, C) """ query = self.transpose_for_scores(self.fc_query(x)) key = self.transpose_for_scores(self.fc_key(x)) value = self.transpose_for_scores(self.fc_value(x)) attention_scores = torch.matmul(query, key.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.ninp / self.nhead) attention_weights = F.softmax(attention_scores, dim=-1) attention_weights = self.dropout(attention_weights) x = torch.matmul(attention_weights, value) x = x.transpose(-3, -2) x = x.reshape(*x.shape[:-2], -1) return x def forward(self, x): chunk_size = 100000 // x.shape[2] outputs = [] for i in range(0, x.shape[1], chunk_size): ed = min(i + chunk_size, x.shape[1]) partial = self.forward_fn(x[:, i:ed]) outputs.append(partial) return torch.cat(outputs, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ninp': 4, 'nhead': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(256)](buf0, primals_3, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(256)](buf1, primals_5, buf4, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(1024)](buf5, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(1024)](buf5, buf6, buf7, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(256)](buf2, primals_7, buf8, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (64, 4, 1), (4, 1, 0), 0), out=buf9) return reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 1, 4), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (64, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0) class MHAttentionNew(nn.Module): def __init__(self, ninp, nhead, dropout): super(MHAttentionNew, self).__init__() if ninp % nhead != 0: raise ValueError( 'The hidden size is not a multiple of the number of attention heads' ) self.nhead = nhead self.ninp = ninp self.fc_query = nn.Linear(ninp, ninp) self.fc_key = nn.Linear(ninp, ninp) self.fc_value = nn.Linear(ninp, ninp) self.dropout = nn.Dropout(dropout) def transpose_for_scores(self, x): """ x has shape (*, L, C) return shape (*, nhead, L, C/nhead) """ new_shape = x.shape[:-1] + (self.nhead, -1) x = x.view(*new_shape) return x.transpose(-3, -2) def forward_fn(self, x): """ x has shape (*, L, C) return shape (*, L, C) """ query = self.transpose_for_scores(self.fc_query(x)) key = self.transpose_for_scores(self.fc_key(x)) value = self.transpose_for_scores(self.fc_value(x)) attention_scores = torch.matmul(query, key.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.ninp / self.nhead) attention_weights = F.softmax(attention_scores, dim=-1) attention_weights = self.dropout(attention_weights) x = torch.matmul(attention_weights, value) x = x.transpose(-3, -2) x = x.reshape(*x.shape[:-2], -1) return x def forward(self, input_0): primals_2 = self.fc_query.weight primals_3 = self.fc_query.bias primals_4 = self.fc_key.weight primals_5 = self.fc_key.bias primals_6 = self.fc_value.weight primals_7 = self.fc_value.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
microsoft/Protein-Folding
MHAttention
false
7,226
[ "MIT" ]
1
f534b2dd1e3f192fbcdadf234f25828c7f458a58
https://github.com/microsoft/Protein-Folding/tree/f534b2dd1e3f192fbcdadf234f25828c7f458a58
FeedForward
import torch from torch import nn class FeedForward(nn.Module): def __init__(self, ninp, dim_feedforward, dropout): super(FeedForward, self).__init__() self.linear1 = nn.Linear(ninp, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, ninp) self.norm1 = nn.LayerNorm(ninp) self.norm2 = nn.LayerNorm(ninp) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = nn.ReLU() def forward_fn(self, x, branch): x = x + self.dropout1(branch) x = self.norm1(x) branch = self.linear2(self.dropout(self.activation(self.linear1(x)))) x = x + self.dropout2(branch) x = self.norm2(x) return x def forward(self, x, branch): chunk_size = 100000 // x.shape[2] outputs = [] for i in range(0, x.shape[1], chunk_size): ed = min(i + chunk_size, x.shape[1]) partial = self.forward_fn(x[:, i:ed], branch[:, i:ed]) outputs.append(partial) return torch.cat(outputs, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ninp': 4, 'dim_feedforward': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_1, primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 del primals_4 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf5, primals_6, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_add_3[grid(256)](buf7, buf3, primals_8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf8 = buf1 del buf1 buf9 = buf0 del buf0 triton_poi_fused_native_layer_norm_4[grid(64)](buf7, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_5[grid(256)](buf7, buf8, buf9, primals_9, primals_10, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del buf9 del primals_10 return buf10, primals_9, buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), buf7, primals_7, buf11, primals_5 class FeedForwardNew(nn.Module): def __init__(self, ninp, dim_feedforward, dropout): super(FeedForwardNew, self).__init__() self.linear1 = nn.Linear(ninp, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, ninp) self.norm1 = nn.LayerNorm(ninp) self.norm2 = nn.LayerNorm(ninp) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = nn.ReLU() def forward_fn(self, x, branch): x = x + self.dropout1(branch) x = self.norm1(x) branch = self.linear2(self.dropout(self.activation(self.linear1(x)))) x = x + self.dropout2(branch) x = self.norm2(x) return x def forward(self, input_0, input_1): primals_5 = self.linear1.weight primals_3 = self.linear1.bias primals_7 = self.linear2.weight primals_4 = self.linear2.bias primals_6 = self.norm1.weight primals_8 = self.norm1.bias primals_9 = self.norm2.weight primals_10 = self.norm2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
microsoft/Protein-Folding
FeedForward
false
7,227
[ "MIT" ]
1
f534b2dd1e3f192fbcdadf234f25828c7f458a58
https://github.com/microsoft/Protein-Folding/tree/f534b2dd1e3f192fbcdadf234f25828c7f458a58
NeuralAccumulatorCell
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.parameter import Parameter class NeuralAccumulatorCell(nn.Module): """A Neural Accumulator (NAC) cell [1]. Attributes: in_dim: size of the input sample. out_dim: size of the output sample. Sources: [1]: https://arxiv.org/abs/1808.00508 """ def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.W_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.M_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.register_parameter('W_hat', self.W_hat) self.register_parameter('M_hat', self.M_hat) self.register_parameter('bias', None) self._reset_params() def _reset_params(self): init.kaiming_uniform_(self.W_hat) init.kaiming_uniform_(self.M_hat) def forward(self, input): W = torch.tanh(self.W_hat) * torch.sigmoid(self.M_hat) return F.linear(input, W, self.bias) def extra_repr(self): return 'in_dim={}, out_dim={}'.format(self.in_dim, self.out_dim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import init from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class NeuralAccumulatorCellNew(nn.Module): """A Neural Accumulator (NAC) cell [1]. Attributes: in_dim: size of the input sample. out_dim: size of the output sample. Sources: [1]: https://arxiv.org/abs/1808.00508 """ def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.W_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.M_hat = Parameter(torch.Tensor(out_dim, in_dim)) self.register_parameter('W_hat', self.W_hat) self.register_parameter('M_hat', self.M_hat) self.register_parameter('bias', None) self._reset_params() def _reset_params(self): init.kaiming_uniform_(self.W_hat) init.kaiming_uniform_(self.M_hat) def extra_repr(self): return 'in_dim={}, out_dim={}'.format(self.in_dim, self.out_dim) def forward(self, input_0): primals_1 = self.W_hat primals_2 = self.M_hat primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mikomel/machine-number-sense
NeuralAccumulatorCell
false
7,228
[ "MIT" ]
1
173b67e4f25bd8249ba4a41904d4cd4af26bae05
https://github.com/mikomel/machine-number-sense/tree/173b67e4f25bd8249ba4a41904d4cd4af26bae05
Conv3x3
import torch import torch.nn as nn class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class Conv3x3New(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3New, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
minjabenho/image2pcl
Conv3x3
false
7,229
[ "Apache-2.0" ]
1
7e696ee48edae30814d32f32e605ad6cf8bf702c
https://github.com/minjabenho/image2pcl/tree/7e696ee48edae30814d32f32e605ad6cf8bf702c
fadein_layer
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.data class fadein_layer(nn.Module): def __init__(self, config): super(fadein_layer, self).__init__() self.alpha = 0.0 def update_alpha(self, delta): self.alpha = self.alpha + delta self.alpha = max(0, min(self.alpha, 1.0)) def set_alpha(self, value): self.alpha = max(0, min(value, 1.0)) def forward(self, x): return torch.add(x[0].mul(1.0 - self.alpha), x[1].mul(self.alpha)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + (64 + x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = 0.0 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class fadein_layerNew(nn.Module): def __init__(self, config): super(fadein_layerNew, self).__init__() self.alpha = 0.0 def update_alpha(self, delta): self.alpha = self.alpha + delta self.alpha = max(0, min(self.alpha, 1.0)) def set_alpha(self, value): self.alpha = max(0, min(value, 1.0)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mingo-x/pggan-pytorch
fadein_layer
false
7,230
[ "MIT" ]
1
a1dde73cd4df52476fe7c948d81fa9caea8070a5
https://github.com/mingo-x/pggan-pytorch/tree/a1dde73cd4df52476fe7c948d81fa9caea8070a5
pixelwise_norm_layer
import torch import torch.nn as nn import torch.utils.data class pixelwise_norm_layer(nn.Module): def __init__(self): super(pixelwise_norm_layer, self).__init__() self.eps = 1e-08 def forward(self, x): return x / (torch.mean(x ** 2, dim=1, keepdim=True) + self.eps) ** 0.5 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tmp0 / tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class pixelwise_norm_layerNew(nn.Module): def __init__(self): super(pixelwise_norm_layerNew, self).__init__() self.eps = 1e-08 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mingo-x/pggan-pytorch
pixelwise_norm_layer
false
7,231
[ "MIT" ]
1
a1dde73cd4df52476fe7c948d81fa9caea8070a5
https://github.com/mingo-x/pggan-pytorch/tree/a1dde73cd4df52476fe7c948d81fa9caea8070a5
equalized_conv2d
import torch import torch.nn as nn from torch.nn.init import normal import torch.utils.data def _calculate_fan_in_and_fan_out(tensor): dimensions = tensor.ndimension() if dimensions < 2: raise ValueError( 'Fan in and fan out can not be computed for tensor with less than 2 dimensions' ) if dimensions == 2: fan_in = tensor.size(1) fan_out = tensor.size(0) else: num_input_fmaps = tensor.size(1) num_output_fmaps = tensor.size(0) receptive_field_size = 1 if tensor.dim() > 2: receptive_field_size = tensor[0][0].numel() fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out class equalized_conv2d(nn.Module): def __init__(self, c_in, c_out, k_size, stride, pad, initializer= 'kaiming', bias=False, a=0.0): super(equalized_conv2d, self).__init__() self.conv = nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False) if initializer == 'kaiming': normal(self.conv.weight) fan_in, _ = _calculate_fan_in_and_fan_out(self.conv.weight) gain = (2.0 / (1.0 + a ** 2)) ** 0.5 self.scale = gain / fan_in ** 0.5 self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0)) def forward(self, x): x = self.conv(x.mul(self.scale)) return x + self.bias.view(1, -1, 1, 1).expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'c_out': 4, 'k_size': 4, 'stride': 1, 'pad': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.init import normal import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1767766952966369 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 9, 9), (324, 81, 9, 1)) buf2 = buf1 del buf1 triton_poi_fused_add_1[grid(1296)](buf2, primals_3, 1296, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 def _calculate_fan_in_and_fan_out(tensor): dimensions = tensor.ndimension() if dimensions < 2: raise ValueError( 'Fan in and fan out can not be computed for tensor with less than 2 dimensions' ) if dimensions == 2: fan_in = tensor.size(1) fan_out = tensor.size(0) else: num_input_fmaps = tensor.size(1) num_output_fmaps = tensor.size(0) receptive_field_size = 1 if tensor.dim() > 2: receptive_field_size = tensor[0][0].numel() fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out class equalized_conv2dNew(nn.Module): def __init__(self, c_in, c_out, k_size, stride, pad, initializer= 'kaiming', bias=False, a=0.0): super(equalized_conv2dNew, self).__init__() self.conv = nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False) if initializer == 'kaiming': normal(self.conv.weight) fan_in, _ = _calculate_fan_in_and_fan_out(self.conv.weight) gain = (2.0 / (1.0 + a ** 2)) ** 0.5 self.scale = gain / fan_in ** 0.5 self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0)) def forward(self, input_0): primals_3 = self.bias primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mingo-x/pggan-pytorch
equalized_conv2d
false
7,232
[ "MIT" ]
1
a1dde73cd4df52476fe7c948d81fa9caea8070a5
https://github.com/mingo-x/pggan-pytorch/tree/a1dde73cd4df52476fe7c948d81fa9caea8070a5
ParityPonderGRU
from torch.nn import Module import torch from torch import nn from typing import Tuple import torch.utils.data import torch.nn.functional import torch.autograd class ParityPonderGRU(Module): """ ## PonderNet with GRU for Parity Task This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html) as the step function. This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`. Each element of the vector is either `0`, `1` or `-1` and the output is the parity - a binary value that is true if the number of `1`s is odd and false otherwise. The prediction of the model is the log probability of the parity being $1$. """ def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'): """ * `n_elems` is the number of elements in the input vector * `n_hidden` is the state vector size of the GRU * `max_steps` is the maximum number of steps $N$ """ super().__init__() self.max_steps = max_steps self.n_hidden = n_hidden self.gru = nn.GRUCell(n_elems, n_hidden) self.output_layer = nn.Linear(n_hidden, 1) self.lambda_layer = nn.Linear(n_hidden, 1) self.lambda_prob = nn.Sigmoid() self.is_halt = False def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ * `x` is the input of shape `[batch_size, n_elems]` This outputs a tuple of four tensors: 1. $p_1 \\dots p_N$ in a tensor of shape `[N, batch_size]` 2. $\\hat{y}_1 \\dots \\hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$ 3. $p_m$ of shape `[batch_size]` 4. $\\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$ """ batch_size = x.shape[0] h = x.new_zeros((x.shape[0], self.n_hidden)) h = self.gru(x, h) p = [] y = [] un_halted_prob = h.new_ones((batch_size,)) halted = h.new_zeros((batch_size,)) p_m = h.new_zeros((batch_size,)) y_m = h.new_zeros((batch_size,)) for n in range(1, self.max_steps + 1): if n == self.max_steps: lambda_n = h.new_ones(h.shape[0]) else: lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0] y_n = self.output_layer(h)[:, 0] p_n = un_halted_prob * lambda_n un_halted_prob = un_halted_prob * (1 - lambda_n) halt = torch.bernoulli(lambda_n) * (1 - halted) p.append(p_n) y.append(y_n) p_m = p_m * (1 - halt) + p_n * halt y_m = y_m * (1 - halt) + y_n * halt halted = halted + halt h = self.gru(x, h) if self.is_halt and halted.sum() == batch_size: break return torch.stack(p), torch.stack(y), p_m, y_m def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_elems': 4, 'n_hidden': 4, 'max_steps': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + x0, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + x0, tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 5, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr1 + x0, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 6, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + x0, tmp29 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tmp0 >= tmp27 tl.full([1], 7, tl.int64) tmp34 = tl.load(in_ptr1 + x0, tmp31 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 7 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp23 = tl.load(in_ptr1 + 0) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tmp0 >= tmp3 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tmp0 >= tmp8 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tmp0 >= tmp12 tmp16 = tl.full([1], 4, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tmp0 >= tmp16 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp25 = tmp0 >= tmp20 tmp26 = tl.full([1], 6, tl.int64) tmp27 = tmp0 < tmp26 tmp28 = tmp25 & tmp27 tl.full([1], 7, tl.int64) tmp32 = tl.where(tmp28, tmp24, tmp24) tmp33 = tl.where(tmp22, tmp24, tmp32) tmp34 = tl.where(tmp18, tmp6, tmp33) tmp35 = tl.where(tmp14, tmp6, tmp34) tmp36 = tl.where(tmp10, tmp6, tmp35) tmp37 = tl.where(tmp4, tmp6, tmp36) tl.store(out_ptr0 + x0, tmp37, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (x0 + 4 * (-16 + x1)), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 24, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + (x0 + 4 * (-20 + x1)), tmp29 & xmask, other=0.0) tmp31 = tmp0 >= tmp27 tl.full([1], 28, tl.int64) tmp34 = tl.load(in_ptr2 + (x0 + 4 * (-24 + x1)), tmp31 & xmask, other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 4) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp5 = tl.load(in_ptr2 + (16 + x0), xmask) tmp8 = tl.load(in_ptr1 + 5) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (20 + x0), xmask) tmp13 = tl.load(in_ptr1 + 6) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp15 = tl.load(in_ptr2 + (24 + x0), xmask) tmp18 = tl.load(in_ptr3 + x0, xmask) tmp20 = tl.load(in_ptr4 + x0, xmask) tmp22 = tl.load(in_ptr5 + x0, xmask) tmp59 = tl.load(in_ptr1 + 0) tmp60 = tl.broadcast_to(tmp59, [XBLOCK]) tmp61 = tl.load(in_ptr2 + x0, xmask) tmp66 = tl.load(in_ptr1 + 1) tmp67 = tl.broadcast_to(tmp66, [XBLOCK]) tmp68 = tl.load(in_ptr2 + (4 + x0), xmask) tmp73 = tl.load(in_ptr1 + 2) tmp74 = tl.broadcast_to(tmp73, [XBLOCK]) tmp75 = tl.load(in_ptr2 + (8 + x0), xmask) tmp80 = tl.load(in_ptr1 + 3) tmp81 = tl.broadcast_to(tmp80, [XBLOCK]) tmp82 = tl.load(in_ptr2 + (12 + x0), xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp11 = tmp9 + tmp10 tmp12 = tl.sigmoid(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp19 = tmp18 < tmp17 tmp21 = tmp20 < tmp12 tmp23 = tmp22 < tmp7 tmp24 = tmp23.to(tl.float32) tmp25 = tmp1 - tmp24 tmp26 = 0.0 tmp27 = tmp26 * tmp25 tmp28 = tmp7 * tmp24 tmp29 = tmp27 + tmp28 tmp30 = tmp21.to(tl.float32) tmp31 = tmp30 * tmp25 tmp32 = tmp1 - tmp31 tmp33 = tmp29 * tmp32 tmp34 = tmp1 - tmp7 tmp35 = tmp34 * tmp12 tmp36 = tmp35 * tmp31 tmp37 = tmp33 + tmp36 tmp38 = tmp19.to(tl.float32) tmp39 = tmp24 + tmp31 tmp40 = tmp1 - tmp39 tmp41 = tmp38 * tmp40 tmp42 = tmp1 - tmp41 tmp43 = tmp37 * tmp42 tmp44 = tmp1 - tmp12 tmp45 = tmp34 * tmp44 tmp46 = tmp45 * tmp17 tmp47 = tmp46 * tmp41 tmp48 = tmp43 + tmp47 tmp49 = tmp2.to(tl.float32) tmp50 = tmp39 + tmp41 tmp51 = tmp1 - tmp50 tmp52 = tmp49 * tmp51 tmp53 = tmp1 - tmp52 tmp54 = tmp48 * tmp53 tmp55 = tmp1 - tmp17 tmp56 = tmp45 * tmp55 tmp57 = tmp56 * tmp52 tmp58 = tmp54 + tmp57 tmp62 = tmp60 + tmp61 tmp63 = tmp62 * tmp24 tmp64 = tmp27 + tmp63 tmp65 = tmp64 * tmp32 tmp69 = tmp67 + tmp68 tmp70 = tmp69 * tmp31 tmp71 = tmp65 + tmp70 tmp72 = tmp71 * tmp42 tmp76 = tmp74 + tmp75 tmp77 = tmp76 * tmp41 tmp78 = tmp72 + tmp77 tmp79 = tmp78 * tmp53 tmp83 = tmp81 + tmp82 tmp84 = tmp83 * tmp52 tmp85 = tmp79 + tmp84 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) tl.store(out_ptr2 + x0, tmp12, xmask) tl.store(out_ptr3 + x0, tmp17, xmask) tl.store(out_ptr4 + x0, tmp19, xmask) tl.store(out_ptr5 + x0, tmp21, xmask) tl.store(out_ptr6 + x0, tmp23, xmask) tl.store(in_out_ptr0 + x0, tmp58, xmask) tl.store(in_out_ptr1 + x0, tmp85, xmask) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = 1.0 tmp12 = tmp11 - tmp10 tmp13 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp12 * tmp13 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tmp0 >= tmp7 tmp18 = tl.full([1], 12, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr0 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tmp11 - tmp21 tmp23 = tl.load(in_ptr1 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp11 - tmp23 tmp25 = tmp22 * tmp24 tmp26 = tl.load(in_ptr2 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tmp0 >= tmp18 tl.full([1], 16, tl.int64) tmp33 = tl.load(in_ptr0 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp34 = tmp11 - tmp33 tmp35 = tl.load(in_ptr1 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp36 = tmp11 - tmp35 tmp37 = tmp34 * tmp36 tmp38 = tl.load(in_ptr2 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp39 = tmp11 - tmp38 tmp40 = tmp37 * tmp39 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp30, tmp40, tmp41) tmp43 = tl.where(tmp20, tmp29, tmp42) tmp44 = tl.where(tmp9, tmp16, tmp43) tmp45 = tl.where(tmp4, tmp5, tmp44) tl.store(out_ptr0 + x0, tmp45, xmask) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp15 = tl.load(in_ptr0 + 1) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp25 = tl.load(in_ptr0 + 2) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp34 = tl.load(in_ptr0 + 3) tmp35 = tl.broadcast_to(tmp34, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp17 = tl.load(in_ptr1 + (4 + (-4 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp27 = tl.load(in_ptr1 + (8 + (-8 + x0)), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tl.full([1], 16, tl.int64) tmp36 = tl.load(in_ptr1 + (12 + (-12 + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tl.store(out_ptr0 + x0, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2) buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0, primals_4, primals_5) buf4 = buf3[0] buf5 = buf3[1] del buf3 buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32) triton_poi_fused_stack_1[grid(28)](primals_8, primals_6, buf6, 28, XBLOCK=32, num_warps=1, num_stages=1) del primals_6 del primals_8 buf7 = empty_strided_cuda((7,), (1,), torch.float32) triton_poi_fused_stack_2[grid(7)](primals_9, primals_7, buf7, 7, XBLOCK=8, num_warps=1, num_stages=1) del primals_7 del primals_9 buf8 = buf2 del buf2 extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf8) buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4, primals_4, primals_5) buf10 = buf9[0] buf11 = buf9[1] del buf9 buf12 = buf8 del buf8 extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf12) buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12, buf10, primals_4, primals_5) buf14 = buf13[0] buf15 = buf13[1] del buf13 buf16 = buf12 del buf12 extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf16) buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16, buf14, primals_4, primals_5) del buf1 del buf16 del primals_4 del primals_5 buf18 = buf17[0] buf19 = buf17[1] del buf17 buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32) triton_poi_fused_stack_3[grid(112)](buf4, buf10, buf14, buf18, buf20, 112, XBLOCK=128, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21) buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf24 = buf23 del buf23 buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf28 = buf27 del buf27 buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf32 = buf31 del buf31 buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4,), (1,), torch.bool) buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf25 = empty_strided_cuda((4,), (1,), torch.bool) buf29 = empty_strided_cuda((4,), (1,), torch.bool) buf33 = empty_strided_cuda((4,), (1,), torch.bool) buf34 = empty_strided_cuda((4,), (1,), torch.float32) buf39 = buf34 del buf34 buf35 = empty_strided_cuda((4,), (1,), torch.float32) buf40 = buf35 del buf35 triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4[ grid(4)](buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32, buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf24 del buf28 del buf32 del buf37 buf41 = reinterpret_tensor(buf18, (16,), (1,), 0) del buf18 triton_poi_fused_stack_5[grid(16)](buf30, buf26, buf22, buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_stack_6[grid(16)](buf7, buf21, buf42, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf21 del buf7 return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0), reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40, primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19, buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor (buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4 ), (16, 1, 4), 0), primals_3) class ParityPonderGRUNew(Module): """ ## PonderNet with GRU for Parity Task This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html) as the step function. This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`. Each element of the vector is either `0`, `1` or `-1` and the output is the parity - a binary value that is true if the number of `1`s is odd and false otherwise. The prediction of the model is the log probability of the parity being $1$. """ def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'): """ * `n_elems` is the number of elements in the input vector * `n_hidden` is the state vector size of the GRU * `max_steps` is the maximum number of steps $N$ """ super().__init__() self.max_steps = max_steps self.n_hidden = n_hidden self.gru = nn.GRUCell(n_elems, n_hidden) self.output_layer = nn.Linear(n_hidden, 1) self.lambda_layer = nn.Linear(n_hidden, 1) self.lambda_prob = nn.Sigmoid() self.is_halt = False def forward(self, input_0): primals_2 = self.gru.weight_ih primals_3 = self.gru.weight_hh primals_4 = self.gru.bias_ih primals_5 = self.gru.bias_hh primals_6 = self.output_layer.weight primals_7 = self.output_layer.bias primals_8 = self.lambda_layer.weight primals_9 = self.lambda_layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1], output[2], output[3]
mcx/annotated_deep_learning_paper_implementations
ParityPonderGRU
false
7,233
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
equalized_linear
import torch import torch.nn as nn from torch.nn.init import normal import torch.utils.data def _calculate_fan_in_and_fan_out(tensor): dimensions = tensor.ndimension() if dimensions < 2: raise ValueError( 'Fan in and fan out can not be computed for tensor with less than 2 dimensions' ) if dimensions == 2: fan_in = tensor.size(1) fan_out = tensor.size(0) else: num_input_fmaps = tensor.size(1) num_output_fmaps = tensor.size(0) receptive_field_size = 1 if tensor.dim() > 2: receptive_field_size = tensor[0][0].numel() fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out class equalized_linear(nn.Module): def __init__(self, c_in, c_out, initializer='kaiming', a=1.0, reshape=False ): super(equalized_linear, self).__init__() self.linear = nn.Linear(c_in, c_out, bias=False) if initializer == 'kaiming': normal(self.linear.weight) fan_in, _ = _calculate_fan_in_and_fan_out(self.linear.weight) gain = (2.0 / (1.0 + a ** 2)) ** 0.5 self.scale = gain / fan_in ** 0.5 if reshape: c_out /= 4 * 4 self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0)) self.reshape = reshape def forward(self, x): x = self.linear(x.mul(self.scale)) if self.reshape: x = x.view(-1, 512, 4, 4) x = x + self.bias.view(1, -1, 1, 1).expand_as(x) else: x = x + self.bias.view(1, -1).expand_as(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'c_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.init import normal import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) def _calculate_fan_in_and_fan_out(tensor): dimensions = tensor.ndimension() if dimensions < 2: raise ValueError( 'Fan in and fan out can not be computed for tensor with less than 2 dimensions' ) if dimensions == 2: fan_in = tensor.size(1) fan_out = tensor.size(0) else: num_input_fmaps = tensor.size(1) num_output_fmaps = tensor.size(0) receptive_field_size = 1 if tensor.dim() > 2: receptive_field_size = tensor[0][0].numel() fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out class equalized_linearNew(nn.Module): def __init__(self, c_in, c_out, initializer='kaiming', a=1.0, reshape=False ): super(equalized_linearNew, self).__init__() self.linear = nn.Linear(c_in, c_out, bias=False) if initializer == 'kaiming': normal(self.linear.weight) fan_in, _ = _calculate_fan_in_and_fan_out(self.linear.weight) gain = (2.0 / (1.0 + a ** 2)) ** 0.5 self.scale = gain / fan_in ** 0.5 if reshape: c_out /= 4 * 4 self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0)) self.reshape = reshape def forward(self, input_0): primals_3 = self.bias primals_2 = self.linear.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mingo-x/pggan-pytorch
equalized_linear
false
7,234
[ "MIT" ]
1
a1dde73cd4df52476fe7c948d81fa9caea8070a5
https://github.com/mingo-x/pggan-pytorch/tree/a1dde73cd4df52476fe7c948d81fa9caea8070a5
ConvBlock
import torch import torch.nn as nn class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlock(nn.Module): """Layer to perform a convolution followed by ELU """ def __init__(self, in_channels, out_channels): super(ConvBlock, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, x): out = self.conv(x) out = self.nonlin(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlockNew(nn.Module): """Layer to perform a convolution followed by ELU """ def __init__(self, in_channels, out_channels): super(ConvBlockNew, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
minjabenho/image2pcl
ConvBlock
false
7,235
[ "Apache-2.0" ]
1
7e696ee48edae30814d32f32e605ad6cf8bf702c
https://github.com/minjabenho/image2pcl/tree/7e696ee48edae30814d32f32e605ad6cf8bf702c
Project3D
import torch import torch.nn as nn class Project3D(nn.Module): """Layer which projects 3D points into a camera with intrinsics K and at position T """ def __init__(self, batch_size, height, width, eps=1e-07): super(Project3D, self).__init__() self.batch_size = batch_size self.height = height self.width = width self.eps = eps def forward(self, points, K, T): P = torch.matmul(K, T)[:, :3, :] cam_points = torch.matmul(P, points) pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze( 1) + self.eps) pix_coords = pix_coords.view(self.batch_size, 2, self.height, self. width) pix_coords = pix_coords.permute(0, 2, 3, 1) pix_coords[..., 0] /= self.width - 1 pix_coords[..., 1] /= self.height - 1 pix_coords = (pix_coords - 0.5) * 2 return pix_coords def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 48 x1 = xindex // 48 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex % 32 x4 = xindex tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tmp1 == tmp1 tmp4 = tl.full([1], 0, tl.int32) tmp5 = tmp1 == tmp4 tmp6 = tmp4 == tmp4 tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tmp7 / tmp10 tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = tl.where(tmp6, tmp13, tmp11) tmp16 = tmp15 / tmp10 tmp17 = tl.where(tmp5, tmp13, tmp16) tmp18 = tl.where(tmp5, tmp14, tmp17) tmp19 = tmp18 * tmp12 tmp20 = tl.where(tmp3, tmp19, tmp18) tmp21 = tmp0 == tmp4 tmp23 = tmp22 / tmp10 tmp24 = tl.where(tmp21, tmp13, tmp23) tmp25 = tl.where(tmp21, tmp14, tmp24) tmp26 = tl.where(tmp2, tmp19, tmp25) tmp27 = tl.where(tmp2, tmp20, tmp26) tmp28 = 0.5 tmp29 = tmp27 - tmp28 tmp30 = 2.0 tmp31 = tmp29 * tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2 ) del arg2_1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32) triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, class Project3DNew(nn.Module): """Layer which projects 3D points into a camera with intrinsics K and at position T """ def __init__(self, batch_size, height, width, eps=1e-07): super(Project3DNew, self).__init__() self.batch_size = batch_size self.height = height self.width = width self.eps = eps def forward(self, input_0, input_1, input_2): arg2_1 = input_0 arg0_1 = input_1 arg1_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
minjabenho/image2pcl
Project3D
false
7,236
[ "Apache-2.0" ]
1
7e696ee48edae30814d32f32e605ad6cf8bf702c
https://github.com/minjabenho/image2pcl/tree/7e696ee48edae30814d32f32e605ad6cf8bf702c
SelfAttnLayer
import torch import torch.nn as nn import torch.nn.functional as F def get_activation_fn(activation): if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu'): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = get_activation_fn(activation) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, attn = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src, attn class SelfAttnLayer(nn.Module): def __init__(self, d_model, nhead=4, dropout=0.1): super().__init__() self.transformer_layer = TransformerEncoderLayer(d_model, nhead, d_model * 1, dropout=dropout, activation='relu') def forward(self, k, mask=None): attn = None k = k.transpose(0, 1) x, attn = self.transformer_layer(k, src_mask=mask) x = x.transpose(0, 1) return x, attn def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mean_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tl.load(in_out_ptr0 + (x1 + 4 * y0), xmask & ymask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x1 + 4 * y0), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2) del primals_2 buf3 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 get_raw_stream(0) triton_poi_fused_add_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 triton_poi_fused_mul_1[grid(16)](buf4, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0) del buf1 triton_poi_fused_add_2[grid(16)](buf5, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (4, 1, 4), (1, 0, 4), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf7 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf9, buf10, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf11) buf12 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mean_6[grid(16)](buf8, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused_add_native_layer_norm_7[grid(4, 4)](buf13, primals_1, primals_5, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del primals_5 buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_native_layer_norm_8[grid(4)](buf13, buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(16)](buf13, buf14, buf15, primals_6, primals_7, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf17) buf18 = buf17 del buf17 triton_poi_fused_relu_10[grid(16)](buf18, primals_9, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf18, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf19) buf20 = buf19 del buf19 triton_poi_fused_add_11[grid(16)](buf20, buf16, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_8[grid(4)](buf20, buf21, buf22, 4, XBLOCK=4, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, primals_12, primals_13, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf21 del buf22 del primals_13 return (reinterpret_tensor(buf23, (4, 4), (1, 4), 0), reinterpret_tensor(buf12, (4, 4), (4, 1), 0), primals_6, primals_12, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf8, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf13, buf16, buf18, buf20, primals_10, primals_8, primals_4, reinterpret_tensor(buf3, ( 4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0)) def get_activation_fn(activation): if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu'): super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = get_activation_fn(activation) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2, attn = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src, attn class SelfAttnLayerNew(nn.Module): def __init__(self, d_model, nhead=4, dropout=0.1): super().__init__() self.transformer_layer = TransformerEncoderLayer(d_model, nhead, d_model * 1, dropout=dropout, activation='relu') def forward(self, input_0): primals_2 = self.transformer_layer.self_attn.in_proj_weight primals_3 = self.transformer_layer.self_attn.in_proj_bias primals_1 = self.transformer_layer.self_attn.out_proj.weight primals_5 = self.transformer_layer.self_attn.out_proj.bias primals_4 = self.transformer_layer.linear1.weight primals_6 = self.transformer_layer.linear1.bias primals_8 = self.transformer_layer.linear2.weight primals_7 = self.transformer_layer.linear2.bias primals_9 = self.transformer_layer.norm1.weight primals_11 = self.transformer_layer.norm1.bias primals_12 = self.transformer_layer.norm2.weight primals_13 = self.transformer_layer.norm2.bias primals_10 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
mensudza/C-Tran
SelfAttnLayer
false
7,237
[ "MIT" ]
1
4895ccb0e675ae2dcd2b619a9e47f30707062668
https://github.com/mensudza/C-Tran/tree/4895ccb0e675ae2dcd2b619a9e47f30707062668
depthwise_separable_conv
import torch import torch.nn as nn class depthwise_separable_conv(torch.nn.Module): def __init__(self, nin, nout, kernel_size, padding): super(depthwise_separable_conv, self).__init__() self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size, padding=padding, groups=nin) self.pointwise = nn.Conv2d(nin, nout, kernel_size=1) def forward(self, x): out = self.depthwise(x) out = self.pointwise(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nin': 4, 'nout': 4, 'kernel_size': 4, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1296)](buf1, primals_2, 1296, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(1296)](buf3, primals_5, 1296, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class depthwise_separable_convNew(torch.nn.Module): def __init__(self, nin, nout, kernel_size, padding): super(depthwise_separable_convNew, self).__init__() self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size, padding=padding, groups=nin) self.pointwise = nn.Conv2d(nin, nout, kernel_size=1) def forward(self, input_0): primals_1 = self.depthwise.weight primals_2 = self.depthwise.bias primals_4 = self.pointwise.weight primals_5 = self.pointwise.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mirayyuce/Neural-Architecture-Search
depthwise_separable_conv
false
7,238
[ "BSD-3-Clause" ]
1
e294816c85200f4301376c8b355634c6cca81816
https://github.com/mirayyuce/Neural-Architecture-Search/tree/e294816c85200f4301376c8b355634c6cca81816
BertPredictionHeadTransform
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() """ Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states): """(N, L, D)""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.erf(tmp11) tmp13 = tmp12 + tmp6 tmp14 = tmp10 * tmp13 tmp15 = tmp8 + tmp14 tmp17 = tmp16 * tmp1 tmp18 = tmp16 * tmp3 tmp19 = libdevice.erf(tmp18) tmp20 = tmp19 + tmp6 tmp21 = tmp17 * tmp20 tmp22 = tmp15 + tmp21 tmp24 = tmp23 * tmp1 tmp25 = tmp23 * tmp3 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp6 tmp28 = tmp24 * tmp27 tmp29 = tmp22 + tmp28 tmp30 = 4.0 tmp31 = tmp29 / tmp30 tmp32 = tmp8 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp14 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp21 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp28 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp30 tl.store(out_ptr0 + x0, tmp31, xmask) tl.store(out_ptr1 + x0, tmp43, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865475 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp11 = tmp9 - tmp10 tmp13 = tmp12 + tmp7 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp11 / tmp14 tmp16 = tmp0 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4, buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del primals_5 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() """ Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertPredictionHeadTransformNew(nn.Module): def __init__(self, config): super(BertPredictionHeadTransformNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, input_0): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_4 = self.LayerNorm.weight primals_5 = self.LayerNorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
minjoong507/Image-Captioning-Transformer
BertPredictionHeadTransform
false
7,239
[ "MIT" ]
1
813060f0bb656e336154173f11e99a80362c8c2a
https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a
Router
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class Squash(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, s: 'torch.Tensor'): """ The shape of `s` is `[batch_size, n_capsules, n_features]` """ s2 = (s ** 2).sum(dim=-1, keepdims=True) return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon)) class Router(Module): """ ## Routing Algorithm This is the routing mechanism described in the paper. You can use multiple routing layers in your models. This combines calculating $\\mathbf{s}_j$ for this layer and the routing algorithm described in *Procedure 1*. """ def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d: 'int', iterations: 'int'): """ `in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below. `out_caps` and `out_d` are the same for this layer. `iterations` is the number of routing iterations, symbolized by $r$ in the paper. """ super().__init__() self.in_caps = in_caps self.out_caps = out_caps self.iterations = iterations self.softmax = nn.Softmax(dim=1) self.squash = Squash() self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d, out_d), requires_grad=True) def forward(self, u: 'torch.Tensor'): """ The shape of `u` is `[batch_size, n_capsules, n_features]`. These are the capsules from the lower layer. """ u_hat = torch.einsum('ijnm,bin->bijm', self.weight, u) b = u.new_zeros(u.shape[0], self.in_caps, self.out_caps) v = None for i in range(self.iterations): c = self.softmax(b) s = torch.einsum('bij,bijm->bjm', c, u_hat) v = self.squash(s) a = torch.einsum('bjm,bijm->bij', v, u_hat) b = b + a return v def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_caps': 4, 'out_caps': 4, 'in_d': 4, 'out_d': 4, 'iterations': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tmp1 = tl_math.exp(tmp0) tmp2 = tmp1 + tmp1 tmp3 = tmp2 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp1 / tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex % 4 x2 = xindex // 4 % 4 x3 = xindex // 16 y0 = yindex x4 = xindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1 + 16 * x3 + 64 * x2), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4 + 64 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex % 4 x2 = xindex // 4 y0 = yindex x3 = xindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * x1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3 + 64 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask) @triton.jit def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (x0 // 4) + x0 % 4), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_add_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x0, tmp14, xmask) tl.store(out_ptr1 + x0, tmp25, xmask) @triton.jit def triton_poi_fused__softmax_add_clone_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_add_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 - tmp22 tmp24 = tl_math.exp(tmp23) tmp25 = tmp9 - tmp22 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp15 - tmp22 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp21 - tmp22 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tl.store(out_ptr0 + x0, tmp22, xmask) tl.store(out_ptr1 + x0, tmp33, xmask) @triton.jit def triton_poi_fused__softmax_add_clone_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_transpose_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 1, 16), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_2[grid(4, 64)](buf1, buf3, 4, 64, XBLOCK=32, YBLOCK=4, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(4, 64)](buf1, buf6, 4, 64, XBLOCK=32, YBLOCK=4, num_warps=4, num_stages=1) del buf1 buf7 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused__softmax_5[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_6[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK= 4, YBLOCK=16, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 1, 4), (1, 64, 16), 0) del buf8 triton_poi_fused_bmm_7[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(buf10, reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf11) buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf12, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused__softmax_add_8[grid(16)](buf7, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused__softmax_add_clone_9[grid(64)](buf7, buf13, buf14, buf15, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf19, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf20) buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused__softmax_add_10[grid(16)](buf7, buf13, buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0) del buf13 buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused__softmax_add_clone_11[grid(64)](buf23, buf7, buf20, buf21, buf22, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 buf25 = buf20 del buf20 extern_kernels.bmm(reinterpret_tensor(buf24, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf25) buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((16, 4, 1), (4, 1, 4), torch.float32) triton_poi_fused_transpose_12[grid(16, 4)](buf9, buf27, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf9 return (buf26, buf4, buf7, buf11, buf16, buf18, buf23, buf25, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 4), 0), buf27, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0)) class Squash(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, s: 'torch.Tensor'): """ The shape of `s` is `[batch_size, n_capsules, n_features]` """ s2 = (s ** 2).sum(dim=-1, keepdims=True) return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon)) class RouterNew(Module): """ ## Routing Algorithm This is the routing mechanism described in the paper. You can use multiple routing layers in your models. This combines calculating $\\mathbf{s}_j$ for this layer and the routing algorithm described in *Procedure 1*. """ def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d: 'int', iterations: 'int'): """ `in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below. `out_caps` and `out_d` are the same for this layer. `iterations` is the number of routing iterations, symbolized by $r$ in the paper. """ super().__init__() self.in_caps = in_caps self.out_caps = out_caps self.iterations = iterations self.softmax = nn.Softmax(dim=1) self.squash = Squash() self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d, out_d), requires_grad=True) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
mcx/annotated_deep_learning_paper_implementations
Router
false
7,240
[ "MIT" ]
1
f169f3a71dd2d36eb28ad31062d3475efa367b88
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
Pointer
import torch import torch.nn as nn def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class Initialized_Conv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, relu=False, bias=False): super().__init__() self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride =stride, padding=padding, groups=groups, bias=bias) if relu is True: self.relu = True nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu') else: self.relu = False nn.init.xavier_uniform_(self.out.weight) def forward(self, x): if self.relu is True: return nn.functional.relu(self.out(x)) else: return self.out(x) class Pointer(nn.Module): def __init__(self, d_model): super().__init__() self.w1 = Initialized_Conv1d(d_model * 2, 1) self.w2 = Initialized_Conv1d(d_model * 2, 1) def forward(self, M1, M2, M3, mask): X1 = torch.cat([M1, M2], dim=1) X2 = torch.cat([M1, M3], dim=1) Y1 = mask_logits(self.w1(X1).squeeze(), mask) Y2 = mask_logits(self.w2(X2).squeeze(), mask) return Y1, Y2 def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x3, tmp10, xmask) tl.store(out_ptr1 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp8 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp1 tmp5 = -1e+30 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp8 * tmp1 tmp10 = tmp9 + tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, primals_3, buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4), (4, 4, 1)) buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4), (4, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_1[grid(64)](buf2, primals_5, buf4, buf3, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del buf4 return buf3, buf5, primals_4, primals_5, primals_6, buf0, buf1 def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class Initialized_Conv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, relu=False, bias=False): super().__init__() self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride =stride, padding=padding, groups=groups, bias=bias) if relu is True: self.relu = True nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu') else: self.relu = False nn.init.xavier_uniform_(self.out.weight) def forward(self, x): if self.relu is True: return nn.functional.relu(self.out(x)) else: return self.out(x) class PointerNew(nn.Module): def __init__(self, d_model): super().__init__() self.w1 = Initialized_Conv1d(d_model * 2, 1) self.w2 = Initialized_Conv1d(d_model * 2, 1) def forward(self, input_0, input_1, input_2, input_3): primals_4 = self.w1.out.weight primals_6 = self.w2.out.weight primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_5 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
mirbostani/QA-KD-AL
Pointer
false
7,241
[ "MIT" ]
1
0ec8756ee06ae2a204a5e9110503bc697e9108fb
https://github.com/mirbostani/QA-KD-AL/tree/0ec8756ee06ae2a204a5e9110503bc697e9108fb
SSIM
import torch import torch.nn as nn class SSIM(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIM, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, x, y): x = self.refl(x) y = self.refl(y) mu_x = self.mu_x_pool(x) mu_y = self.mu_y_pool(y) sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tmp21 = tmp20 + tmp19 tmp23 = tmp22 + tmp21 tmp25 = tmp24 + tmp23 tmp27 = tmp26 + tmp25 tmp29 = tmp28 + tmp27 tmp31 = tmp30 + tmp29 tmp33 = tmp32 + tmp31 tmp35 = tmp34 + tmp33 tmp36 = tmp35 * tmp17 tmp37 = tmp19 * tmp19 tmp38 = tmp20 * tmp20 tmp39 = tmp38 + tmp37 tmp40 = tmp22 * tmp22 tmp41 = tmp40 + tmp39 tmp42 = tmp24 * tmp24 tmp43 = tmp42 + tmp41 tmp44 = tmp26 * tmp26 tmp45 = tmp44 + tmp43 tmp46 = tmp28 * tmp28 tmp47 = tmp46 + tmp45 tmp48 = tmp30 * tmp30 tmp49 = tmp48 + tmp47 tmp50 = tmp32 * tmp32 tmp51 = tmp50 + tmp49 tmp52 = tmp34 * tmp34 tmp53 = tmp52 + tmp51 tmp54 = tmp53 * tmp17 tmp57 = tmp56 + tmp55 tmp59 = tmp58 + tmp57 tmp61 = tmp60 + tmp59 tmp63 = tmp62 + tmp61 tmp65 = tmp64 + tmp63 tmp67 = tmp66 + tmp65 tmp69 = tmp68 + tmp67 tmp71 = tmp70 + tmp69 tmp72 = tmp71 * tmp17 tmp73 = tmp55 * tmp55 tmp74 = tmp56 * tmp56 tmp75 = tmp74 + tmp73 tmp76 = tmp58 * tmp58 tmp77 = tmp76 + tmp75 tmp78 = tmp60 * tmp60 tmp79 = tmp78 + tmp77 tmp80 = tmp62 * tmp62 tmp81 = tmp80 + tmp79 tmp82 = tmp64 * tmp64 tmp83 = tmp82 + tmp81 tmp84 = tmp66 * tmp66 tmp85 = tmp84 + tmp83 tmp86 = tmp68 * tmp68 tmp87 = tmp86 + tmp85 tmp88 = tmp70 * tmp70 tmp89 = tmp88 + tmp87 tmp90 = tmp89 * tmp17 tmp91 = 2.0 tmp92 = tmp36 * tmp91 tmp93 = tmp92 * tmp72 tmp94 = 0.0001 tmp95 = tmp93 + tmp94 tmp96 = tmp36 * tmp72 tmp97 = tmp18 - tmp96 tmp98 = tmp97 * tmp91 tmp99 = 0.0009 tmp100 = tmp98 + tmp99 tmp101 = tmp95 * tmp100 tmp102 = tmp36 * tmp36 tmp103 = tmp72 * tmp72 tmp104 = tmp102 + tmp103 tmp105 = tmp104 + tmp94 tmp106 = tmp54 - tmp102 tmp107 = tmp90 - tmp103 tmp108 = tmp106 + tmp107 tmp109 = tmp108 + tmp99 tmp110 = tmp105 * tmp109 tmp111 = tmp101 / tmp110 tmp112 = 1.0 tmp113 = tmp112 - tmp111 tmp114 = 0.5 tmp115 = tmp113 * tmp114 tmp116 = 0.0 tmp117 = triton_helpers.maximum(tmp115, tmp116) tmp118 = triton_helpers.minimum(tmp117, tmp112) tl.store(in_out_ptr0 + x3, tmp118, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1, buf2, 576, XBLOCK=128, num_warps=4, num_stages=1) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf0 del buf0 buf7 = buf6 del buf6 triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[ grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf2 return buf7, class SSIMNew(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIMNew, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
minjabenho/image2pcl
SSIM
false
7,242
[ "Apache-2.0" ]
1
7e696ee48edae30814d32f32e605ad6cf8bf702c
https://github.com/minjabenho/image2pcl/tree/7e696ee48edae30814d32f32e605ad6cf8bf702c
dream_loss
import torch class dream_loss(torch.nn.Module): def __init__(self): super(dream_loss, self).__init__() def forward(self, yhat, y): diff = torch.sum(yhat - y) return diff def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class dream_lossNew(torch.nn.Module): def __init__(self): super(dream_lossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mkelcb/knet
dream_loss
false
7,244
[ "MIT" ]
1
f0e75f526c8bcdc6969052328b2b1b9cd6767cd8
https://github.com/mkelcb/knet/tree/f0e75f526c8bcdc6969052328b2b1b9cd6767cd8
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -10000.0 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = float('-inf') tmp37 = tmp6 == tmp36 tmp38 = tmp37 == 0 tmp39 = tmp38.to(tl.int64) tmp40 = tmp39 != 0 tmp41 = tmp11 == tmp36 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tmp46 = tmp17 == tmp36 tmp47 = tmp46 == 0 tmp48 = tmp47.to(tl.int64) tmp49 = tmp48 != 0 tmp50 = tmp45 | tmp49 tmp51 = tmp23 == tmp36 tmp52 = tmp51 == 0 tmp53 = tmp52.to(tl.int64) tmp54 = tmp53 != 0 tmp55 = tmp50 | tmp54 tl.store(out_ptr0 + x3, tmp24, xmask) tl.store(out_ptr1 + x3, tmp35, xmask) tl.store(out_ptr2 + x3, tmp55, xmask) @triton.jit def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x3 = xindex // 64 x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x5, xmask) tmp3 = tl.load(in_ptr1 + (x6 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = -10000.0 tmp7 = tmp5 * tmp6 tmp8 = tmp2 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tl.store(in_out_ptr0 + x5, tmp15, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2) del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_mul_rsub_1[grid(64)](buf5, primals_1, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_mul_rsub_2[grid(256)](buf9, buf8, primals_1, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_1 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf11 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.query.weight primals_3 = self.query.bias primals_5 = self.key.weight primals_6 = self.key.bias primals_8 = self.value.weight primals_9 = self.value.bias primals_1 = input_0 primals_4 = input_1 primals_7 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
minjoong507/Image-Captioning-Transformer
BertSelfAttention
false
7,247
[ "MIT" ]
1
813060f0bb656e336154173f11e99a80362c8c2a
https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a
BertLMPredictionHead
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() """ Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states): """(N, L, D)""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.softmax = nn.Softmax(dim=1) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias hidden_states = self.softmax(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, vocab_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.erf(tmp11) tmp13 = tmp12 + tmp6 tmp14 = tmp10 * tmp13 tmp15 = tmp8 + tmp14 tmp17 = tmp16 * tmp1 tmp18 = tmp16 * tmp3 tmp19 = libdevice.erf(tmp18) tmp20 = tmp19 + tmp6 tmp21 = tmp17 * tmp20 tmp22 = tmp15 + tmp21 tmp24 = tmp23 * tmp1 tmp25 = tmp23 * tmp3 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp6 tmp28 = tmp24 * tmp27 tmp29 = tmp22 + tmp28 tmp30 = 4.0 tmp31 = tmp29 / tmp30 tmp32 = tmp8 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp14 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp21 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp28 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp30 tl.store(out_ptr0 + x0, tmp31, xmask) tl.store(out_ptr1 + x0, tmp43, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865475 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp11 = tmp9 - tmp10 tmp13 = tmp12 + tmp7 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp11 / tmp14 tmp16 = tmp0 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp6 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp9 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x4, tmp11, xmask) tl.store(out_ptr1 + x4, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 64 x5 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr2 + (x5 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(in_out_ptr0 + x4, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4, buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 64, 4, 1), 0) del buf2 buf6 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 64, 4, 1), 0) del buf1 triton_poi_fused__softmax_add_2[grid(64)](buf4, primals_7, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_add_3[grid(256)](buf7, primals_7, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 del primals_7 return buf7, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf7, primals_6 def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() """ Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states): """(N, L, D)""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHeadNew(nn.Module): def __init__(self, config): super(BertLMPredictionHeadNew, self).__init__() self.transform = BertPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_2 = self.bias primals_1 = self.transform.dense.weight primals_4 = self.transform.dense.bias primals_5 = self.transform.LayerNorm.weight primals_7 = self.transform.LayerNorm.bias primals_6 = self.decoder.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
minjoong507/Image-Captioning-Transformer
BertLMPredictionHead
false
7,248
[ "MIT" ]
1
813060f0bb656e336154173f11e99a80362c8c2a
https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a
CAT_TokenEmbedding
import torch import torch.nn as nn class CAT_TokenEmbedding(nn.Module): def __init__(self, c_in=1, d_feature=10): super(CAT_TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_feature, kernel_size=3, padding=padding, padding_mode='circular') for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x: 'torch.Tensor'): x = x.unsqueeze(1) x = x.transpose(0, 2) x = self.tokenConv(x).permute(1, 2, 0) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 6 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex x1 = xindex tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x1 + 4 * y0), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x1 + 4 * y0), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x1 + 4 * y0), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x1 + 4 * y0), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (10, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(6, 4)](primals_1, buf1, 6, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 10, 4), (40, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(160)](buf3, primals_3, 160, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (10, 4, 4), (4, 1, 40), 0), primals_2, buf1 class CAT_TokenEmbeddingNew(nn.Module): def __init__(self, c_in=1, d_feature=10): super(CAT_TokenEmbeddingNew, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_feature, kernel_size=3, padding=padding, padding_mode='circular') for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, input_0): primals_2 = self.tokenConv.weight primals_3 = self.tokenConv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mkmysk123456789/Informer2020
CAT_TokenEmbedding
false
7,250
[ "Apache-2.0" ]
1
ad4b895169a17db580aab6d2c09fd07e06c9b6fa
https://github.com/mkmysk123456789/Informer2020/tree/ad4b895169a17db580aab6d2c09fd07e06c9b6fa
BertAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() """ Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.dropout) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, input_tensor, input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, dropout=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -10000.0 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = float('-inf') tmp37 = tmp6 == tmp36 tmp38 = tmp37 == 0 tmp39 = tmp38.to(tl.int64) tmp40 = tmp39 != 0 tmp41 = tmp11 == tmp36 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tmp46 = tmp17 == tmp36 tmp47 = tmp46 == 0 tmp48 = tmp47.to(tl.int64) tmp49 = tmp48 != 0 tmp50 = tmp45 | tmp49 tmp51 = tmp23 == tmp36 tmp52 = tmp51 == 0 tmp53 = tmp52.to(tl.int64) tmp54 = tmp53 != 0 tmp55 = tmp50 | tmp54 tl.store(out_ptr0 + x3, tmp24, xmask) tl.store(out_ptr1 + x3, tmp35, xmask) tl.store(out_ptr2 + x3, tmp55, xmask) @triton.jit def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x3 = xindex // 64 x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x5, xmask) tmp3 = tl.load(in_ptr1 + (x6 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = -10000.0 tmp7 = tmp5 * tmp6 tmp8 = tmp2 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tl.store(in_out_ptr0 + x5, tmp15, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_mul_rsub_1[grid(64)](buf5, primals_1, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_mul_rsub_2[grid(256)](buf9, buf8, primals_1, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_1 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_4, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11, buf13, primals_4, buf14, buf15, primals_12, buf16, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_12 return buf16, primals_4, primals_11, buf9, reinterpret_tensor(buf10, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9 class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() """ Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.dropout) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_2 = self.self.query.weight primals_3 = self.self.query.bias primals_5 = self.self.key.weight primals_6 = self.self.key.bias primals_7 = self.self.value.weight primals_8 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
minjoong507/Image-Captioning-Transformer
BertAttention
false
7,252
[ "MIT" ]
1
813060f0bb656e336154173f11e99a80362c8c2a
https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a
BoundSoftmaxImpl
import torch import torch.nn as nn class BoundSoftmaxImpl(nn.Module): def __init__(self, axis): super().__init__() self.axis = axis def forward(self, x): max_x = torch.max(x, dim=self.axis).values assert self.axis == int(self.axis) x = torch.exp(x - max_x.unsqueeze(self.axis)) s = torch.sum(x, dim=self.axis, keepdim=True) return x / s def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'axis': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_sub_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_div_sum_1[grid(1024)](buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return buf1, class BoundSoftmaxImplNew(nn.Module): def __init__(self, axis): super().__init__() self.axis = axis def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mnmueller/auto_LiRPA
BoundSoftmaxImpl
false
7,253
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
CAT_TemporalEmbedding
import math import torch import torch.nn as nn class CAT_FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(CAT_FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class CAT_TemporalEmbedding(nn.Module): def __init__(self, d_feature=10, embed_type='fixed', freq='h'): super(CAT_TemporalEmbedding, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = CAT_FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_feature) self.hour_embed = Embed(hour_size, d_feature) self.weekday_embed = Embed(weekday_size, d_feature) self.day_embed = Embed(day_size, d_feature) self.month_embed = Embed(month_size, d_feature) def forward(self, x): x = x.long() minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0.0 hour_x = self.hour_embed(x[:, :, 3]) weekday_x = self.weekday_embed(x[:, :, 2]) day_x = self.day_embed(x[:, :, 1]) month_x = self.month_embed(x[:, :, 0]) temporal_embed = hour_x + weekday_x + day_x + month_x + minute_x temporal_embed = temporal_embed.permute(2, 0, 1) return temporal_embed def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 10 x0 = xindex % 10 x2 = xindex tmp0 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 24, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 24) | ~xmask, 'index out of bounds: 0 <= tmp5 < 24') tmp7 = tl.load(in_ptr1 + (x0 + 10 * tmp5), xmask) tmp9 = tmp8.to(tl.int64) tmp10 = tl.full([XBLOCK], 7, tl.int32) tmp11 = tmp9 + tmp10 tmp12 = tmp9 < 0 tmp13 = tl.where(tmp12, tmp11, tmp9) tl.device_assert((0 <= tmp13) & (tmp13 < 7) | ~xmask, 'index out of bounds: 0 <= tmp13 < 7') tmp15 = tl.load(in_ptr2 + (x0 + 10 * tmp13), xmask) tmp16 = tmp7 + tmp15 tmp18 = tmp17.to(tl.int64) tmp19 = tl.full([XBLOCK], 32, tl.int32) tmp20 = tmp18 + tmp19 tmp21 = tmp18 < 0 tmp22 = tl.where(tmp21, tmp20, tmp18) tl.device_assert((0 <= tmp22) & (tmp22 < 32) | ~xmask, 'index out of bounds: 0 <= tmp22 < 32') tmp24 = tl.load(in_ptr3 + (x0 + 10 * tmp22), xmask) tmp25 = tmp16 + tmp24 tmp27 = tmp26.to(tl.int64) tmp28 = tl.full([XBLOCK], 13, tl.int32) tmp29 = tmp27 + tmp28 tmp30 = tmp27 < 0 tmp31 = tl.where(tmp30, tmp29, tmp27) tl.device_assert((0 <= tmp31) & (tmp31 < 13) | ~xmask, 'index out of bounds: 0 <= tmp31 < 13') tmp33 = tl.load(in_ptr4 + (x0 + 10 * tmp31), xmask) tmp34 = tmp25 + tmp33 tmp35 = 0.0 tmp36 = tmp34 + tmp35 tl.store(out_ptr0 + x2, tmp36, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (24, 10), (10, 1)) assert_size_stride(arg2_1, (7, 10), (10, 1)) assert_size_stride(arg3_1, (32, 10), (10, 1)) assert_size_stride(arg4_1, (13, 10), (10, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10), (40, 10, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_embedding_0[grid(160)](arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 160, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return reinterpret_tensor(buf0, (10, 4, 4), (1, 40, 10), 0), class CAT_FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(CAT_FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class CAT_TemporalEmbeddingNew(nn.Module): def __init__(self, d_feature=10, embed_type='fixed', freq='h'): super(CAT_TemporalEmbeddingNew, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = CAT_FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_feature) self.hour_embed = Embed(hour_size, d_feature) self.weekday_embed = Embed(weekday_size, d_feature) self.day_embed = Embed(day_size, d_feature) self.month_embed = Embed(month_size, d_feature) def forward(self, input_0): arg1_1 = self.hour_embed.emb.weight arg2_1 = self.weekday_embed.emb.weight arg3_1 = self.day_embed.emb.weight arg4_1 = self.month_embed.emb.weight arg0_1 = input_0 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
mkmysk123456789/Informer2020
CAT_TemporalEmbedding
false
7,254
[ "Apache-2.0" ]
1
ad4b895169a17db580aab6d2c09fd07e06c9b6fa
https://github.com/mkmysk123456789/Informer2020/tree/ad4b895169a17db580aab6d2c09fd07e06c9b6fa
CQAttention
import torch import torch.nn as nn import torch.nn.functional as F def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class CQAttention(nn.Module): def __init__(self, d_model, dropout=0.1): super().__init__() w4C = torch.empty(d_model, 1) w4Q = torch.empty(d_model, 1) w4mlu = torch.empty(1, 1, d_model) nn.init.xavier_uniform_(w4C) nn.init.xavier_uniform_(w4Q) nn.init.xavier_uniform_(w4mlu) self.w4C = nn.Parameter(w4C) self.w4Q = nn.Parameter(w4Q) self.w4mlu = nn.Parameter(w4mlu) bias = torch.empty(1) nn.init.constant_(bias, 0) self.bias = nn.Parameter(bias) self.dropout = dropout def forward(self, C, Q, Cmask, Qmask): C = C.transpose(1, 2) Q = Q.transpose(1, 2) batch_size_c = C.size()[0] _batch_size, Lc, _d_model = C.shape _batch_size, Lq, _d_model = Q.shape S = self.trilinear_for_attention(C, Q) Cmask = Cmask.view(batch_size_c, Lc, 1) Qmask = Qmask.view(batch_size_c, 1, Lq) S1 = F.softmax(mask_logits(S, Qmask), dim=2) S2 = F.softmax(mask_logits(S, Cmask), dim=1) A = torch.bmm(S1, Q) B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C) out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2) return out.transpose(1, 2) def trilinear_for_attention(self, C, Q): _batch_size, Lc, _d_model = C.shape _batch_size, Lq, _d_model = Q.shape dropout = self.dropout C = F.dropout(C, p=dropout, training=self.training) Q = F.dropout(Q, p=dropout, training=self.training) subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq]) subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1] ) subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2)) res = subres0 + subres1 + subres2 res += self.bias return res def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x4, xmask) tmp5 = tl.load(in_ptr3 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp8 = tl.load(in_ptr4 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp4 + tmp6 tmp9 = tmp7 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp8 tmp12 = -1e+30 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp7 * tmp15 tmp17 = tmp10 - tmp15 tmp18 = tmp17 * tmp12 tmp19 = tmp16 + tmp18 tl.store(out_ptr0 + x4, tmp14, xmask) tl.store(out_ptr1 + x4, tmp19, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex // 16 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x1 + 4 * (-8 + x0) + 16 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x3 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (x1 + 4 * (-12 + x0) + 16 * x2), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr2 + (4 * x3 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_2, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf2, primals_4, out=buf3) del primals_4 buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_mul_1[grid(64)](primals_1, primals_5, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, primals_2, out=buf5) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_2[grid(64)](buf1, buf3, buf5, primals_6, primals_8, primals_7, buf6, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del buf3 del primals_6 buf7 = buf5 del buf5 triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf7 del buf7 triton_poi_fused__softmax_5[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = buf9 del buf9 triton_poi_fused__softmax_6[grid(64)](buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf10 del buf10 extern_kernels.bmm(buf8, reinterpret_tensor(primals_2, (4, 4, 4), ( 16, 1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf14) del buf13 buf15 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_7[grid(256)](primals_1, buf12, buf14, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del buf14 return reinterpret_tensor(buf15, (4, 16, 4), (64, 1, 16), 0 ), primals_7, primals_8, reinterpret_tensor(primals_1, (4, 4, 4), ( 16, 1, 4), 0), primals_2, buf8, buf11, reinterpret_tensor(buf2, (4, 16), (1, 4), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0) def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class CQAttentionNew(nn.Module): def __init__(self, d_model, dropout=0.1): super().__init__() w4C = torch.empty(d_model, 1) w4Q = torch.empty(d_model, 1) w4mlu = torch.empty(1, 1, d_model) nn.init.xavier_uniform_(w4C) nn.init.xavier_uniform_(w4Q) nn.init.xavier_uniform_(w4mlu) self.w4C = nn.Parameter(w4C) self.w4Q = nn.Parameter(w4Q) self.w4mlu = nn.Parameter(w4mlu) bias = torch.empty(1) nn.init.constant_(bias, 0) self.bias = nn.Parameter(bias) self.dropout = dropout def trilinear_for_attention(self, C, Q): _batch_size, Lc, _d_model = C.shape _batch_size, Lq, _d_model = Q.shape dropout = self.dropout C = F.dropout(C, p=dropout, training=self.training) Q = F.dropout(Q, p=dropout, training=self.training) subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq]) subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1] ) subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2)) res = subres0 + subres1 + subres2 res += self.bias return res def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.w4C primals_4 = self.w4Q primals_5 = self.w4mlu primals_6 = self.bias primals_1 = input_0 primals_2 = input_1 primals_7 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
mirbostani/QA-KD-AL
CQAttention
false
7,255
[ "MIT" ]
1
0ec8756ee06ae2a204a5e9110503bc697e9108fb
https://github.com/mirbostani/QA-KD-AL/tree/0ec8756ee06ae2a204a5e9110503bc697e9108fb
Transition
import torch import torch.nn as nn import torch.nn.functional as F class Transition(nn.Module): def __init__(self, in_planes, out_planes): super(Transition, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True) def forward(self, x): out = self.conv(F.relu(x)) out = F.avg_pool2d(out, 2) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'out_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_avg_pool2d_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_2, buf0, buf2 class TransitionNew(nn.Module): def __init__(self, in_planes, out_planes): super(TransitionNew, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mnmueller/auto_LiRPA
Transition
false
7,256
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
mlp_2layer
import torch import torch.nn as nn import torch.nn.functional as F class mlp_2layer(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_2layer, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 10) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 64), (64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (10, 256), (256, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, primals_4 class mlp_2layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_2layerNew, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mnmueller/auto_LiRPA
mlp_2layer
false
7,257
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
BertLayerNormNoVar
import torch import torch.nn as nn class BertLayerNormNoVar(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNoVar, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) x = x - u return self.weight * x + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class BertLayerNormNoVarNew(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNoVarNew, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mnmueller/auto_LiRPA
BertLayerNormNoVar
false
7,258
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
mlp_5layer
import torch import torch.nn as nn import torch.nn.functional as F class mlp_5layer(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_5layer, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 256 * width) self.fc3 = nn.Linear(256 * width, 256 * width) self.fc4 = nn.Linear(256 * width, 128 * width) self.fc5 = nn.Linear(128 * width, 10) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.relu(self.fc4(x)) x = self.fc5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 64), (64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256), (256, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (128, 256), (256, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (10, 128), (128, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 256), ( 1, 256), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(1024)](buf3, primals_5, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 256), ( 1, 256), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(1024)](buf5, primals_7, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (256, 128), ( 1, 256), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_1[grid(512)](buf7, primals_9, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf7, reinterpret_tensor( primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8) del primals_11 return buf8, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4 class mlp_5layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_5layerNew, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 256 * width) self.fc3 = nn.Linear(256 * width, 256 * width) self.fc4 = nn.Linear(256 * width, 128 * width) self.fc5 = nn.Linear(128 * width, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_10 = self.fc5.weight primals_11 = self.fc5.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
mnmueller/auto_LiRPA
mlp_5layer
false
7,259
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
mlp_3layer
import torch import torch.nn as nn import torch.nn.functional as F class mlp_3layer(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_3layer, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 128 * width) self.fc3 = nn.Linear(128 * width, 10) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 64), (64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (10, 128), (128, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 128), ( 1, 256), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(512)](buf3, primals_5, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, buf3, primals_6, primals_4 class mlp_3layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_3layerNew, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 128 * width) self.fc3 = nn.Linear(128 * width, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mnmueller/auto_LiRPA
mlp_3layer
false
7,261
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
AdaptiveInstanceNorm
import torch import torch.nn as nn from math import sqrt def equal_lr(module, name='weight'): EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) class AdaptiveInstanceNorm(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) self.style.linear.bias.data[:in_channel] = 1 self.style.linear.bias.data[in_channel:] = 0 def forward(self, input, style): style = self.style(style).unsqueeze(2).unsqueeze(3) gamma, beta = style.chunk(2, 1) out = self.norm(input) out = gamma * out + beta return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.7071067811865476 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp24 = tmp22 + tmp23 tmp25 = tmp0 - tmp10 tmp26 = tmp25 * tmp21 tmp27 = tmp24 * tmp26 tmp30 = tmp28 + tmp29 tmp31 = tmp27 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4 ), 0), out=buf1) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5, primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del primals_2 return buf6, buf0, primals_3, primals_4, buf2, buf5 def equal_lr(module, name='weight'): EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) class AdaptiveInstanceNormNew(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) self.style.linear.bias.data[:in_channel] = 1 self.style.linear.bias.data[in_channel:] = 0 def forward(self, input_0, input_1): primals_2 = self.style.linear.bias primals_1 = self.style.linear.weight_orig primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
mmhnoaccount/DeepChroma_128
AdaptiveInstanceNorm
false
7,262
[ "MIT" ]
1
337ec961bfc4ee44f48cb84e624c293ee2805b62
https://github.com/mmhnoaccount/DeepChroma_128/tree/337ec961bfc4ee44f48cb84e624c293ee2805b62
cnn_4layer
import torch import torch.nn as nn import torch.nn.functional as F class cnn_4layer(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256): super(cnn_4layer, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (256, 16), (16, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (10, 256), (256, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1)) buf3 = reinterpret_tensor(buf2, (4, 16, 1, 1), (16, 1, 64, 64), 0) del buf2 buf7 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(64)](buf3, primals_5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (4, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(1024)](buf5, primals_7, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_9 return buf6, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3 , (4, 16), (16, 1), 0), buf5, primals_8, primals_6, buf7 class cnn_4layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256): super(cnn_4layerNew, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
mnmueller/auto_LiRPA
cnn_4layer
false
7,263
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
cnn_4layer_LeakyRelu
import torch import torch.nn as nn import torch.nn.functional as F class cnn_4layer_LeakyRelu(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256, alpha=0.1): super(cnn_4layer_LeakyRelu, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) self.alpha = alpha def forward(self, x): x = F.leaky_relu(self.conv1(x), self.alpha) x = F.leaky_relu(self.conv2(x), self.alpha) x = x.view(x.size(0), -1) x = F.leaky_relu(self.fc1(x), self.alpha) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (256, 16), (16, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (10, 256), (256, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1)) buf1 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.bool) buf2 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(128)](buf0, primals_2, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 1, 1), (16, 1, 1, 1)) buf4 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) buf5 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) triton_poi_fused_convolution_leaky_relu_1[grid(64)](buf3, primals_5, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (4, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf6) buf7 = empty_strided_cuda((4, 256), (256, 1), torch.bool) buf8 = empty_strided_cuda((4, 256), (256, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(1024)](buf6, primals_7, buf7, buf8, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_7 buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf9) del primals_9 return (buf9, primals_1, primals_3, primals_4, buf1, buf2, buf4, reinterpret_tensor(buf5, (4, 16), (16, 1), 0), buf7, buf8, primals_8, primals_6) class cnn_4layer_LeakyReluNew(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256, alpha=0.1): super(cnn_4layer_LeakyReluNew, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) self.alpha = alpha def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
mnmueller/auto_LiRPA
cnn_4layer_LeakyRelu
false
7,264
[ "BSD-3-Clause" ]
1
55cb270b0b99f07b74541d55706c69fbb9daff66
https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66
Net2
import torch from torch import nn class Net2(nn.Module): """ Net2 is a more complex network consisting of two hidden layers with 400 and 300 neurons """ hidden1 = 400 hidden2 = 300 def __init__(self, input_size): super(Net2, self).__init__() self.fc1 = nn.Linear(input_size, self.hidden1) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(self.hidden1, self.hidden2) self.relu2 = nn.ReLU() self.fc3 = nn.Linear(self.hidden2, 1) def forward(self, x): out = self.fc1(x) out = self.relu1(out) out = self.fc2(out) out = self.relu2(out) out = self.fc3(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300,), (1,)) assert_size_stride(primals_6, (1, 300), (300, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf8, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2, primals_5, buf3, buf7, 19200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK =128, num_warps=4, num_stages=1) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_7 return reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), buf4, primals_6, buf7, primals_4, buf8 class Net2New(nn.Module): """ Net2 is a more complex network consisting of two hidden layers with 400 and 300 neurons """ hidden1 = 400 hidden2 = 300 def __init__(self, input_size): super(Net2New, self).__init__() self.fc1 = nn.Linear(input_size, self.hidden1) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(self.hidden1, self.hidden2) self.relu2 = nn.ReLU() self.fc3 = nn.Linear(self.hidden2, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
moritzschaefer/pavooc
Net2
false
7,265
[ "MIT" ]
1
735f5455f9a95a5734436a24e2aa92cf600c91af
https://github.com/moritzschaefer/pavooc/tree/735f5455f9a95a5734436a24e2aa92cf600c91af
Debugnetwork
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from torch.nn import init class conv(nn.Module): """ n*n conv with relu """ def __init__(self, in_dim, out_dim, kernal_size, stride, padding): super(conv, self).__init__() self.con_layer = nn.Conv2d(in_dim, out_dim, kernal_size, stride, padding) self.relu = nn.ReLU(inplace=True) self.initi() def forward(self, input_): output = self.con_layer(input_) output = self.relu(output) return output def initi(self): init.normal_(self.con_layer.weight, std=0.01) if self.con_layer.bias is not None: init.constant_(self.con_layer.bias, 0.0) class VGG_19(nn.Module): """ VGG_19 first 10 layers 11 and 12 by CMU """ def __init__(self, input_dim): super(VGG_19, self).__init__() self.conv1_1 = conv(input_dim, 64, 3, 1, 1) self.conv1_2 = conv(64, 64, 3, 1, 1) self.pooling_1 = nn.MaxPool2d(2, 2, 0) self.conv2_1 = conv(64, 128, 3, 1, 1) self.conv2_2 = conv(128, 128, 3, 1, 1) self.pooling_2 = nn.MaxPool2d(2, 2, 0) self.conv3_1 = conv(128, 256, 3, 1, 1) self.conv3_2 = conv(256, 256, 3, 1, 1) self.conv3_3 = conv(256, 256, 3, 1, 1) self.conv3_4 = conv(256, 256, 3, 1, 1) self.pooling_3 = nn.MaxPool2d(2, 2, 0) self.conv4_1 = conv(256, 512, 3, 1, 1) self.conv4_2 = conv(512, 512, 3, 1, 1) self.conv4_3 = conv(512, 256, 3, 1, 1) self.conv4_4 = conv(256, 128, 3, 1, 1) def forward(self, input_): output = self.conv1_1(input_) output = self.conv1_2(output) output = self.pooling_1(output) output = self.conv2_1(output) output = self.conv2_2(output) output = self.pooling_2(output) output = self.conv3_1(output) output = self.conv3_2(output) output = self.conv3_3(output) output = self.conv3_4(output) output = self.pooling_3(output) output = self.conv4_1(output) output = self.conv4_2(output) output = self.conv4_3(output) output = self.conv4_4(output) return output class Debugnetwork(nn.Module): """ """ def __init__(self, args): super(Debugnetwork, self).__init__() self.block_0 = VGG_19(3) def forward(self, input_): output = self.block_0(input_) return output def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'args': _mock_config()}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_25, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4, buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19, buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_6[grid(131072)](buf25, primals_21, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_7[grid(65536)](buf27, primals_23, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1)) buf29 = buf28 del buf28 buf30 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_8[grid(32768)]( buf29, primals_25, buf30, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_25 return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf27, buf30) class conv(nn.Module): """ n*n conv with relu """ def __init__(self, in_dim, out_dim, kernal_size, stride, padding): super(conv, self).__init__() self.con_layer = nn.Conv2d(in_dim, out_dim, kernal_size, stride, padding) self.relu = nn.ReLU(inplace=True) self.initi() def forward(self, input_): output = self.con_layer(input_) output = self.relu(output) return output def initi(self): init.normal_(self.con_layer.weight, std=0.01) if self.con_layer.bias is not None: init.constant_(self.con_layer.bias, 0.0) class VGG_19(nn.Module): """ VGG_19 first 10 layers 11 and 12 by CMU """ def __init__(self, input_dim): super(VGG_19, self).__init__() self.conv1_1 = conv(input_dim, 64, 3, 1, 1) self.conv1_2 = conv(64, 64, 3, 1, 1) self.pooling_1 = nn.MaxPool2d(2, 2, 0) self.conv2_1 = conv(64, 128, 3, 1, 1) self.conv2_2 = conv(128, 128, 3, 1, 1) self.pooling_2 = nn.MaxPool2d(2, 2, 0) self.conv3_1 = conv(128, 256, 3, 1, 1) self.conv3_2 = conv(256, 256, 3, 1, 1) self.conv3_3 = conv(256, 256, 3, 1, 1) self.conv3_4 = conv(256, 256, 3, 1, 1) self.pooling_3 = nn.MaxPool2d(2, 2, 0) self.conv4_1 = conv(256, 512, 3, 1, 1) self.conv4_2 = conv(512, 512, 3, 1, 1) self.conv4_3 = conv(512, 256, 3, 1, 1) self.conv4_4 = conv(256, 128, 3, 1, 1) def forward(self, input_): output = self.conv1_1(input_) output = self.conv1_2(output) output = self.pooling_1(output) output = self.conv2_1(output) output = self.conv2_2(output) output = self.pooling_2(output) output = self.conv3_1(output) output = self.conv3_2(output) output = self.conv3_3(output) output = self.conv3_4(output) output = self.pooling_3(output) output = self.conv4_1(output) output = self.conv4_2(output) output = self.conv4_3(output) output = self.conv4_4(output) return output class DebugnetworkNew(nn.Module): """ """ def __init__(self, args): super(DebugnetworkNew, self).__init__() self.block_0 = VGG_19(3) def forward(self, input_0): primals_1 = self.block_0.conv1_1.con_layer.weight primals_2 = self.block_0.conv1_1.con_layer.bias primals_4 = self.block_0.conv1_2.con_layer.weight primals_5 = self.block_0.conv1_2.con_layer.bias primals_6 = self.block_0.conv2_1.con_layer.weight primals_7 = self.block_0.conv2_1.con_layer.bias primals_8 = self.block_0.conv2_2.con_layer.weight primals_9 = self.block_0.conv2_2.con_layer.bias primals_10 = self.block_0.conv3_1.con_layer.weight primals_11 = self.block_0.conv3_1.con_layer.bias primals_12 = self.block_0.conv3_2.con_layer.weight primals_13 = self.block_0.conv3_2.con_layer.bias primals_14 = self.block_0.conv3_3.con_layer.weight primals_15 = self.block_0.conv3_3.con_layer.bias primals_16 = self.block_0.conv3_4.con_layer.weight primals_17 = self.block_0.conv3_4.con_layer.bias primals_18 = self.block_0.conv4_1.con_layer.weight primals_19 = self.block_0.conv4_1.con_layer.bias primals_20 = self.block_0.conv4_2.con_layer.weight primals_21 = self.block_0.conv4_2.con_layer.bias primals_22 = self.block_0.conv4_3.con_layer.weight primals_23 = self.block_0.conv4_3.con_layer.bias primals_24 = self.block_0.conv4_4.con_layer.weight primals_25 = self.block_0.conv4_4.con_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0]
H-Liu1997/Pytorch_Pose_Estimation_Framework
Debugnetwork
false
7,266
[ "MIT" ]
1
06616b3459ff639f8486e6ea4f93922597788b2a
https://github.com/H-Liu1997/Pytorch_Pose_Estimation_Framework/tree/06616b3459ff639f8486e6ea4f93922597788b2a
NeuralNet
import torch import torch.nn as nn import torch.nn.functional as F class NeuralNet(nn.Module): def __init__(self, num_input_nodes, num_hidden_nodes, output_dimension): super(NeuralNet, self).__init__() self.input_linear = nn.Linear(num_input_nodes, num_hidden_nodes) self.output_linear = nn.Linear(num_hidden_nodes, output_dimension) def forward(self, input_vector): out = self.input_linear(input_vector) out = F.tanh(out) out = self.output_linear(out) out = F.softmax(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_input_nodes': 4, 'num_hidden_nodes': 4, 'output_dimension': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_4 class NeuralNetNew(nn.Module): def __init__(self, num_input_nodes, num_hidden_nodes, output_dimension): super(NeuralNetNew, self).__init__() self.input_linear = nn.Linear(num_input_nodes, num_hidden_nodes) self.output_linear = nn.Linear(num_hidden_nodes, output_dimension) def forward(self, input_0): primals_1 = self.input_linear.weight primals_2 = self.input_linear.bias primals_4 = self.output_linear.weight primals_5 = self.output_linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mohiitgupta/named-entity-recognition-nlp-purdue
NeuralNet
false
7,267
[ "MIT" ]
1
68232bbd5d17f3e3989e5df37175cdc670896608
https://github.com/mohiitgupta/named-entity-recognition-nlp-purdue/tree/68232bbd5d17f3e3989e5df37175cdc670896608
LoRALayer
import torch from torch import nn import torch.nn.parallel import torch.utils.data class LoRALayer(nn.Module): def __init__(self, n_in, n_out=None, adapter_dim=16, adapter_alpha=32): super(LoRALayer, self).__init__() if not n_out: n_out = n_in self.adapter_dim = adapter_dim self.adapter_alpha = adapter_alpha self.adapter_proj_1 = nn.Linear(n_in, adapter_dim, bias=False) nn.init.normal_(self.adapter_proj_1.weight, std=0.02) self.adapter_proj_2 = nn.Linear(adapter_dim, n_out, bias=False) self.adapter_proj_2.weight.data.zero_() def forward(self, x): scale_factor = self.adapter_dim / self.adapter_alpha result = torch.matmul(x, self.adapter_proj_1.weight.type_as(x).T) return torch.matmul(result, self.adapter_proj_2.weight.type_as(x).T ) * scale_factor def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](buf2, 256, XBLOCK=128, num_warps= 4, num_stages=1) return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, primals_3 class LoRALayerNew(nn.Module): def __init__(self, n_in, n_out=None, adapter_dim=16, adapter_alpha=32): super(LoRALayerNew, self).__init__() if not n_out: n_out = n_in self.adapter_dim = adapter_dim self.adapter_alpha = adapter_alpha self.adapter_proj_1 = nn.Linear(n_in, adapter_dim, bias=False) nn.init.normal_(self.adapter_proj_1.weight, std=0.02) self.adapter_proj_2 = nn.Linear(adapter_dim, n_out, bias=False) self.adapter_proj_2.weight.data.zero_() def forward(self, input_0): primals_1 = self.adapter_proj_1.weight primals_3 = self.adapter_proj_2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mojishoki/LoRA
LoRALayer
false
7,268
[ "MIT" ]
1
556225e776b4e2c5f77d332db15f0c712c13fe0e
https://github.com/mojishoki/LoRA/tree/556225e776b4e2c5f77d332db15f0c712c13fe0e
NetVLAD
import torch import numpy as np from torch import nn import torch.nn.functional as F class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, dim, num_clusters=64): """ Args: dim : int Dimension of descriptors num_clusters : int The number of clusters """ super(NetVLAD, self).__init__() self.num_clusters = num_clusters self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=False ) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) def init_params(self, clsts, traindescs): clsts_assign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clsts_assign, traindescs.T) dots.sort(0) dots = dots[::-1, :] alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(alpha * clsts_assign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None def forward(self, x, crm=None): N, C = x.shape[:2] soft_assign = self.conv(x).view(N, self.num_clusters, -1) soft_assign = F.softmax(soft_assign, dim=1) if crm is not None: assert crm.shape[0] == N and crm.shape[1] == 1 and crm.shape[2: ] == x.shape[2:] soft_assign = torch.mul(soft_assign, crm.view(N, 1, -1)) x_flatten = x.view(N, C, -1) vlad = torch.zeros((N, self.num_clusters, C), dtype=x.dtype, layout =x.layout, device=x.device) for c in range(self.num_clusters): residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3 ) - self.centroids[c:c + 1, :].expand(x_flatten.size(-1), - 1, -1).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign[:, c:c + 1, :].unsqueeze(2) vlad[:, c:c + 1, :] = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(N, -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp10, xmask) @triton.jit def triton_per_fused_mul_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (20 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (24 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (28 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (36 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (40 + x0), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (44 + x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (52 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (56 + x0), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (60 + x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (68 + x0), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (72 + x0), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (76 + x0), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (80 + x0), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr1 + (84 + x0), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (88 + x0), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr1 + (92 + x0), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr1 + (96 + x0), xmask, eviction_policy='evict_last') tmp49 = tl.load(in_ptr1 + (100 + x0), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr1 + (104 + x0), xmask, eviction_policy='evict_last') tmp53 = tl.load(in_ptr1 + (108 + x0), xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr1 + (112 + x0), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp59 = tl.load(in_ptr2 + (r2 + 1024 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp60 = tl.load(in_ptr3 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp63 = tl.load(in_ptr4 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp70 = tl.load(in_ptr2 + (16 + r2 + 1024 * x1), xmask, eviction_policy ='evict_last', other=0.0) tmp79 = tl.load(in_ptr2 + (32 + r2 + 1024 * x1), xmask, eviction_policy ='evict_last', other=0.0) tmp88 = tl.load(in_ptr2 + (48 + r2 + 1024 * x1), xmask, eviction_policy ='evict_last', other=0.0) tmp97 = tl.load(in_ptr2 + (64 + r2 + 1024 * x1), xmask, eviction_policy ='evict_last', other=0.0) tmp106 = tl.load(in_ptr2 + (80 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp115 = tl.load(in_ptr2 + (96 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp124 = tl.load(in_ptr2 + (112 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp133 = tl.load(in_ptr2 + (128 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp142 = tl.load(in_ptr2 + (144 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp151 = tl.load(in_ptr2 + (160 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp160 = tl.load(in_ptr2 + (176 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp169 = tl.load(in_ptr2 + (192 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp178 = tl.load(in_ptr2 + (208 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp187 = tl.load(in_ptr2 + (224 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp196 = tl.load(in_ptr2 + (240 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp205 = tl.load(in_ptr2 + (256 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp214 = tl.load(in_ptr2 + (272 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp223 = tl.load(in_ptr2 + (288 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp232 = tl.load(in_ptr2 + (304 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp241 = tl.load(in_ptr2 + (320 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp250 = tl.load(in_ptr2 + (336 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp259 = tl.load(in_ptr2 + (352 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp268 = tl.load(in_ptr2 + (368 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp277 = tl.load(in_ptr2 + (384 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp286 = tl.load(in_ptr2 + (400 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp295 = tl.load(in_ptr2 + (416 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp304 = tl.load(in_ptr2 + (432 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp313 = tl.load(in_ptr2 + (448 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp4 = tmp0 - tmp3 tmp6 = tmp0 - tmp5 tmp8 = tmp0 - tmp7 tmp10 = tmp0 - tmp9 tmp12 = tmp0 - tmp11 tmp14 = tmp0 - tmp13 tmp16 = tmp0 - tmp15 tmp18 = tmp0 - tmp17 tmp20 = tmp0 - tmp19 tmp22 = tmp0 - tmp21 tmp24 = tmp0 - tmp23 tmp26 = tmp0 - tmp25 tmp28 = tmp0 - tmp27 tmp30 = tmp0 - tmp29 tmp32 = tmp0 - tmp31 tmp34 = tmp0 - tmp33 tmp36 = tmp0 - tmp35 tmp38 = tmp0 - tmp37 tmp40 = tmp0 - tmp39 tmp42 = tmp0 - tmp41 tmp44 = tmp0 - tmp43 tmp46 = tmp0 - tmp45 tmp48 = tmp0 - tmp47 tmp50 = tmp0 - tmp49 tmp52 = tmp0 - tmp51 tmp54 = tmp0 - tmp53 tmp56 = tmp0 - tmp55 tmp58 = tmp0 - tmp57 tmp61 = tmp59 - tmp60 tmp62 = tl_math.exp(tmp61) tmp64 = tmp62 / tmp63 tmp65 = tmp58 * tmp64 tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK]) tmp68 = tl.where(xmask, tmp66, 0) tmp69 = tl.sum(tmp68, 1)[:, None] tmp71 = tmp70 - tmp60 tmp72 = tl_math.exp(tmp71) tmp73 = tmp72 / tmp63 tmp74 = tmp2 * tmp73 tmp75 = tl.broadcast_to(tmp74, [XBLOCK, RBLOCK]) tmp77 = tl.where(xmask, tmp75, 0) tmp78 = tl.sum(tmp77, 1)[:, None] tmp80 = tmp79 - tmp60 tmp81 = tl_math.exp(tmp80) tmp82 = tmp81 / tmp63 tmp83 = tmp4 * tmp82 tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK]) tmp86 = tl.where(xmask, tmp84, 0) tmp87 = tl.sum(tmp86, 1)[:, None] tmp89 = tmp88 - tmp60 tmp90 = tl_math.exp(tmp89) tmp91 = tmp90 / tmp63 tmp92 = tmp6 * tmp91 tmp93 = tl.broadcast_to(tmp92, [XBLOCK, RBLOCK]) tmp95 = tl.where(xmask, tmp93, 0) tmp96 = tl.sum(tmp95, 1)[:, None] tmp98 = tmp97 - tmp60 tmp99 = tl_math.exp(tmp98) tmp100 = tmp99 / tmp63 tmp101 = tmp8 * tmp100 tmp102 = tl.broadcast_to(tmp101, [XBLOCK, RBLOCK]) tmp104 = tl.where(xmask, tmp102, 0) tmp105 = tl.sum(tmp104, 1)[:, None] tmp107 = tmp106 - tmp60 tmp108 = tl_math.exp(tmp107) tmp109 = tmp108 / tmp63 tmp110 = tmp10 * tmp109 tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK]) tmp113 = tl.where(xmask, tmp111, 0) tmp114 = tl.sum(tmp113, 1)[:, None] tmp116 = tmp115 - tmp60 tmp117 = tl_math.exp(tmp116) tmp118 = tmp117 / tmp63 tmp119 = tmp12 * tmp118 tmp120 = tl.broadcast_to(tmp119, [XBLOCK, RBLOCK]) tmp122 = tl.where(xmask, tmp120, 0) tmp123 = tl.sum(tmp122, 1)[:, None] tmp125 = tmp124 - tmp60 tmp126 = tl_math.exp(tmp125) tmp127 = tmp126 / tmp63 tmp128 = tmp14 * tmp127 tmp129 = tl.broadcast_to(tmp128, [XBLOCK, RBLOCK]) tmp131 = tl.where(xmask, tmp129, 0) tmp132 = tl.sum(tmp131, 1)[:, None] tmp134 = tmp133 - tmp60 tmp135 = tl_math.exp(tmp134) tmp136 = tmp135 / tmp63 tmp137 = tmp16 * tmp136 tmp138 = tl.broadcast_to(tmp137, [XBLOCK, RBLOCK]) tmp140 = tl.where(xmask, tmp138, 0) tmp141 = tl.sum(tmp140, 1)[:, None] tmp143 = tmp142 - tmp60 tmp144 = tl_math.exp(tmp143) tmp145 = tmp144 / tmp63 tmp146 = tmp18 * tmp145 tmp147 = tl.broadcast_to(tmp146, [XBLOCK, RBLOCK]) tmp149 = tl.where(xmask, tmp147, 0) tmp150 = tl.sum(tmp149, 1)[:, None] tmp152 = tmp151 - tmp60 tmp153 = tl_math.exp(tmp152) tmp154 = tmp153 / tmp63 tmp155 = tmp20 * tmp154 tmp156 = tl.broadcast_to(tmp155, [XBLOCK, RBLOCK]) tmp158 = tl.where(xmask, tmp156, 0) tmp159 = tl.sum(tmp158, 1)[:, None] tmp161 = tmp160 - tmp60 tmp162 = tl_math.exp(tmp161) tmp163 = tmp162 / tmp63 tmp164 = tmp22 * tmp163 tmp165 = tl.broadcast_to(tmp164, [XBLOCK, RBLOCK]) tmp167 = tl.where(xmask, tmp165, 0) tmp168 = tl.sum(tmp167, 1)[:, None] tmp170 = tmp169 - tmp60 tmp171 = tl_math.exp(tmp170) tmp172 = tmp171 / tmp63 tmp173 = tmp24 * tmp172 tmp174 = tl.broadcast_to(tmp173, [XBLOCK, RBLOCK]) tmp176 = tl.where(xmask, tmp174, 0) tmp177 = tl.sum(tmp176, 1)[:, None] tmp179 = tmp178 - tmp60 tmp180 = tl_math.exp(tmp179) tmp181 = tmp180 / tmp63 tmp182 = tmp26 * tmp181 tmp183 = tl.broadcast_to(tmp182, [XBLOCK, RBLOCK]) tmp185 = tl.where(xmask, tmp183, 0) tmp186 = tl.sum(tmp185, 1)[:, None] tmp188 = tmp187 - tmp60 tmp189 = tl_math.exp(tmp188) tmp190 = tmp189 / tmp63 tmp191 = tmp28 * tmp190 tmp192 = tl.broadcast_to(tmp191, [XBLOCK, RBLOCK]) tmp194 = tl.where(xmask, tmp192, 0) tmp195 = tl.sum(tmp194, 1)[:, None] tmp197 = tmp196 - tmp60 tmp198 = tl_math.exp(tmp197) tmp199 = tmp198 / tmp63 tmp200 = tmp30 * tmp199 tmp201 = tl.broadcast_to(tmp200, [XBLOCK, RBLOCK]) tmp203 = tl.where(xmask, tmp201, 0) tmp204 = tl.sum(tmp203, 1)[:, None] tmp206 = tmp205 - tmp60 tmp207 = tl_math.exp(tmp206) tmp208 = tmp207 / tmp63 tmp209 = tmp32 * tmp208 tmp210 = tl.broadcast_to(tmp209, [XBLOCK, RBLOCK]) tmp212 = tl.where(xmask, tmp210, 0) tmp213 = tl.sum(tmp212, 1)[:, None] tmp215 = tmp214 - tmp60 tmp216 = tl_math.exp(tmp215) tmp217 = tmp216 / tmp63 tmp218 = tmp34 * tmp217 tmp219 = tl.broadcast_to(tmp218, [XBLOCK, RBLOCK]) tmp221 = tl.where(xmask, tmp219, 0) tmp222 = tl.sum(tmp221, 1)[:, None] tmp224 = tmp223 - tmp60 tmp225 = tl_math.exp(tmp224) tmp226 = tmp225 / tmp63 tmp227 = tmp36 * tmp226 tmp228 = tl.broadcast_to(tmp227, [XBLOCK, RBLOCK]) tmp230 = tl.where(xmask, tmp228, 0) tmp231 = tl.sum(tmp230, 1)[:, None] tmp233 = tmp232 - tmp60 tmp234 = tl_math.exp(tmp233) tmp235 = tmp234 / tmp63 tmp236 = tmp38 * tmp235 tmp237 = tl.broadcast_to(tmp236, [XBLOCK, RBLOCK]) tmp239 = tl.where(xmask, tmp237, 0) tmp240 = tl.sum(tmp239, 1)[:, None] tmp242 = tmp241 - tmp60 tmp243 = tl_math.exp(tmp242) tmp244 = tmp243 / tmp63 tmp245 = tmp40 * tmp244 tmp246 = tl.broadcast_to(tmp245, [XBLOCK, RBLOCK]) tmp248 = tl.where(xmask, tmp246, 0) tmp249 = tl.sum(tmp248, 1)[:, None] tmp251 = tmp250 - tmp60 tmp252 = tl_math.exp(tmp251) tmp253 = tmp252 / tmp63 tmp254 = tmp42 * tmp253 tmp255 = tl.broadcast_to(tmp254, [XBLOCK, RBLOCK]) tmp257 = tl.where(xmask, tmp255, 0) tmp258 = tl.sum(tmp257, 1)[:, None] tmp260 = tmp259 - tmp60 tmp261 = tl_math.exp(tmp260) tmp262 = tmp261 / tmp63 tmp263 = tmp44 * tmp262 tmp264 = tl.broadcast_to(tmp263, [XBLOCK, RBLOCK]) tmp266 = tl.where(xmask, tmp264, 0) tmp267 = tl.sum(tmp266, 1)[:, None] tmp269 = tmp268 - tmp60 tmp270 = tl_math.exp(tmp269) tmp271 = tmp270 / tmp63 tmp272 = tmp46 * tmp271 tmp273 = tl.broadcast_to(tmp272, [XBLOCK, RBLOCK]) tmp275 = tl.where(xmask, tmp273, 0) tmp276 = tl.sum(tmp275, 1)[:, None] tmp278 = tmp277 - tmp60 tmp279 = tl_math.exp(tmp278) tmp280 = tmp279 / tmp63 tmp281 = tmp48 * tmp280 tmp282 = tl.broadcast_to(tmp281, [XBLOCK, RBLOCK]) tmp284 = tl.where(xmask, tmp282, 0) tmp285 = tl.sum(tmp284, 1)[:, None] tmp287 = tmp286 - tmp60 tmp288 = tl_math.exp(tmp287) tmp289 = tmp288 / tmp63 tmp290 = tmp50 * tmp289 tmp291 = tl.broadcast_to(tmp290, [XBLOCK, RBLOCK]) tmp293 = tl.where(xmask, tmp291, 0) tmp294 = tl.sum(tmp293, 1)[:, None] tmp296 = tmp295 - tmp60 tmp297 = tl_math.exp(tmp296) tmp298 = tmp297 / tmp63 tmp299 = tmp52 * tmp298 tmp300 = tl.broadcast_to(tmp299, [XBLOCK, RBLOCK]) tmp302 = tl.where(xmask, tmp300, 0) tmp303 = tl.sum(tmp302, 1)[:, None] tmp305 = tmp304 - tmp60 tmp306 = tl_math.exp(tmp305) tmp307 = tmp306 / tmp63 tmp308 = tmp54 * tmp307 tmp309 = tl.broadcast_to(tmp308, [XBLOCK, RBLOCK]) tmp311 = tl.where(xmask, tmp309, 0) tmp312 = tl.sum(tmp311, 1)[:, None] tmp314 = tmp313 - tmp60 tmp315 = tl_math.exp(tmp314) tmp316 = tmp315 / tmp63 tmp317 = tmp56 * tmp316 tmp318 = tl.broadcast_to(tmp317, [XBLOCK, RBLOCK]) tmp320 = tl.where(xmask, tmp318, 0) tmp321 = tl.sum(tmp320, 1)[:, None] tl.store(out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp4, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp6, xmask) tl.store(out_ptr3 + (r2 + 16 * x3), tmp8, xmask) tl.store(out_ptr4 + (r2 + 16 * x3), tmp10, xmask) tl.store(out_ptr5 + (r2 + 16 * x3), tmp12, xmask) tl.store(out_ptr6 + (r2 + 16 * x3), tmp14, xmask) tl.store(out_ptr7 + (r2 + 16 * x3), tmp16, xmask) tl.store(out_ptr8 + (r2 + 16 * x3), tmp18, xmask) tl.store(out_ptr9 + (r2 + 16 * x3), tmp20, xmask) tl.store(out_ptr10 + (r2 + 16 * x3), tmp22, xmask) tl.store(out_ptr11 + (r2 + 16 * x3), tmp24, xmask) tl.store(out_ptr12 + (r2 + 16 * x3), tmp26, xmask) tl.store(out_ptr13 + (r2 + 16 * x3), tmp28, xmask) tl.store(out_ptr14 + (r2 + 16 * x3), tmp30, xmask) tl.store(out_ptr15 + (r2 + 16 * x3), tmp32, xmask) tl.store(out_ptr16 + (r2 + 16 * x3), tmp34, xmask) tl.store(out_ptr17 + (r2 + 16 * x3), tmp36, xmask) tl.store(out_ptr18 + (r2 + 16 * x3), tmp38, xmask) tl.store(out_ptr19 + (r2 + 16 * x3), tmp40, xmask) tl.store(out_ptr20 + (r2 + 16 * x3), tmp42, xmask) tl.store(out_ptr21 + (r2 + 16 * x3), tmp44, xmask) tl.store(out_ptr22 + (r2 + 16 * x3), tmp46, xmask) tl.store(out_ptr23 + (r2 + 16 * x3), tmp48, xmask) tl.store(out_ptr24 + (r2 + 16 * x3), tmp50, xmask) tl.store(out_ptr25 + (r2 + 16 * x3), tmp52, xmask) tl.store(out_ptr26 + (r2 + 16 * x3), tmp54, xmask) tl.store(out_ptr27 + (r2 + 16 * x3), tmp56, xmask) tl.store(out_ptr28 + x3, tmp69, xmask) tl.store(out_ptr29 + x3, tmp78, xmask) tl.store(out_ptr30 + x3, tmp87, xmask) tl.store(out_ptr31 + x3, tmp96, xmask) tl.store(out_ptr32 + x3, tmp105, xmask) tl.store(out_ptr33 + x3, tmp114, xmask) tl.store(out_ptr34 + x3, tmp123, xmask) tl.store(out_ptr35 + x3, tmp132, xmask) tl.store(out_ptr36 + x3, tmp141, xmask) tl.store(out_ptr37 + x3, tmp150, xmask) tl.store(out_ptr38 + x3, tmp159, xmask) tl.store(out_ptr39 + x3, tmp168, xmask) tl.store(out_ptr40 + x3, tmp177, xmask) tl.store(out_ptr41 + x3, tmp186, xmask) tl.store(out_ptr42 + x3, tmp195, xmask) tl.store(out_ptr43 + x3, tmp204, xmask) tl.store(out_ptr44 + x3, tmp213, xmask) tl.store(out_ptr45 + x3, tmp222, xmask) tl.store(out_ptr46 + x3, tmp231, xmask) tl.store(out_ptr47 + x3, tmp240, xmask) tl.store(out_ptr48 + x3, tmp249, xmask) tl.store(out_ptr49 + x3, tmp258, xmask) tl.store(out_ptr50 + x3, tmp267, xmask) tl.store(out_ptr51 + x3, tmp276, xmask) tl.store(out_ptr52 + x3, tmp285, xmask) tl.store(out_ptr53 + x3, tmp294, xmask) tl.store(out_ptr54 + x3, tmp303, xmask) tl.store(out_ptr55 + x3, tmp312, xmask) tl.store(out_ptr56 + x3, tmp321, xmask) @triton.jit def triton_per_fused_mul_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (116 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (120 + x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (124 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (132 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (136 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (140 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (144 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (148 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (152 + x0), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (156 + x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (160 + x0), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (164 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (168 + x0), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (172 + x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (176 + x0), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (180 + x0), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (184 + x0), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (188 + x0), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr1 + (196 + x0), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (200 + x0), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr1 + (204 + x0), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr1 + (208 + x0), xmask, eviction_policy='evict_last') tmp49 = tl.load(in_ptr1 + (212 + x0), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr1 + (216 + x0), xmask, eviction_policy='evict_last') tmp53 = tl.load(in_ptr1 + (220 + x0), xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr1 + (224 + x0), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr2 + (464 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr3 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp61 = tl.load(in_ptr4 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp68 = tl.load(in_ptr2 + (480 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp77 = tl.load(in_ptr2 + (496 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp86 = tl.load(in_ptr2 + (512 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp95 = tl.load(in_ptr2 + (528 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp104 = tl.load(in_ptr2 + (544 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.load(in_ptr2 + (560 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.load(in_ptr2 + (576 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp131 = tl.load(in_ptr2 + (592 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp140 = tl.load(in_ptr2 + (608 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp149 = tl.load(in_ptr2 + (624 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp158 = tl.load(in_ptr2 + (640 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp167 = tl.load(in_ptr2 + (656 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp176 = tl.load(in_ptr2 + (672 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.load(in_ptr2 + (688 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.load(in_ptr2 + (704 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp203 = tl.load(in_ptr2 + (720 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp212 = tl.load(in_ptr2 + (736 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp221 = tl.load(in_ptr2 + (752 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp230 = tl.load(in_ptr2 + (768 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp239 = tl.load(in_ptr2 + (784 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp248 = tl.load(in_ptr2 + (800 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.load(in_ptr2 + (816 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.load(in_ptr2 + (832 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp275 = tl.load(in_ptr2 + (848 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp284 = tl.load(in_ptr2 + (864 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp293 = tl.load(in_ptr2 + (880 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp302 = tl.load(in_ptr2 + (896 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp4 = tmp0 - tmp3 tmp6 = tmp0 - tmp5 tmp8 = tmp0 - tmp7 tmp10 = tmp0 - tmp9 tmp12 = tmp0 - tmp11 tmp14 = tmp0 - tmp13 tmp16 = tmp0 - tmp15 tmp18 = tmp0 - tmp17 tmp20 = tmp0 - tmp19 tmp22 = tmp0 - tmp21 tmp24 = tmp0 - tmp23 tmp26 = tmp0 - tmp25 tmp28 = tmp0 - tmp27 tmp30 = tmp0 - tmp29 tmp32 = tmp0 - tmp31 tmp34 = tmp0 - tmp33 tmp36 = tmp0 - tmp35 tmp38 = tmp0 - tmp37 tmp40 = tmp0 - tmp39 tmp42 = tmp0 - tmp41 tmp44 = tmp0 - tmp43 tmp46 = tmp0 - tmp45 tmp48 = tmp0 - tmp47 tmp50 = tmp0 - tmp49 tmp52 = tmp0 - tmp51 tmp54 = tmp0 - tmp53 tmp56 = tmp0 - tmp55 tmp59 = tmp57 - tmp58 tmp60 = tl_math.exp(tmp59) tmp62 = tmp60 / tmp61 tmp63 = tmp2 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = tl.where(xmask, tmp64, 0) tmp67 = tl.sum(tmp66, 1)[:, None] tmp69 = tmp68 - tmp58 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp61 tmp72 = tmp4 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = tl.where(xmask, tmp73, 0) tmp76 = tl.sum(tmp75, 1)[:, None] tmp78 = tmp77 - tmp58 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp61 tmp81 = tmp6 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = tl.where(xmask, tmp82, 0) tmp85 = tl.sum(tmp84, 1)[:, None] tmp87 = tmp86 - tmp58 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp61 tmp90 = tmp8 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = tl.where(xmask, tmp91, 0) tmp94 = tl.sum(tmp93, 1)[:, None] tmp96 = tmp95 - tmp58 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp61 tmp99 = tmp10 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = tl.where(xmask, tmp100, 0) tmp103 = tl.sum(tmp102, 1)[:, None] tmp105 = tmp104 - tmp58 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp61 tmp108 = tmp12 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = tl.where(xmask, tmp109, 0) tmp112 = tl.sum(tmp111, 1)[:, None] tmp114 = tmp113 - tmp58 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp61 tmp117 = tmp14 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = tl.where(xmask, tmp118, 0) tmp121 = tl.sum(tmp120, 1)[:, None] tmp123 = tmp122 - tmp58 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp61 tmp126 = tmp16 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = tl.where(xmask, tmp127, 0) tmp130 = tl.sum(tmp129, 1)[:, None] tmp132 = tmp131 - tmp58 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp61 tmp135 = tmp18 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = tl.where(xmask, tmp136, 0) tmp139 = tl.sum(tmp138, 1)[:, None] tmp141 = tmp140 - tmp58 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp61 tmp144 = tmp20 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = tl.where(xmask, tmp145, 0) tmp148 = tl.sum(tmp147, 1)[:, None] tmp150 = tmp149 - tmp58 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp61 tmp153 = tmp22 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = tl.where(xmask, tmp154, 0) tmp157 = tl.sum(tmp156, 1)[:, None] tmp159 = tmp158 - tmp58 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp61 tmp162 = tmp24 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = tl.where(xmask, tmp163, 0) tmp166 = tl.sum(tmp165, 1)[:, None] tmp168 = tmp167 - tmp58 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp61 tmp171 = tmp26 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = tl.where(xmask, tmp172, 0) tmp175 = tl.sum(tmp174, 1)[:, None] tmp177 = tmp176 - tmp58 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp61 tmp180 = tmp28 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = tl.where(xmask, tmp181, 0) tmp184 = tl.sum(tmp183, 1)[:, None] tmp186 = tmp185 - tmp58 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp61 tmp189 = tmp30 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = tl.where(xmask, tmp190, 0) tmp193 = tl.sum(tmp192, 1)[:, None] tmp195 = tmp194 - tmp58 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp61 tmp198 = tmp32 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = tl.where(xmask, tmp199, 0) tmp202 = tl.sum(tmp201, 1)[:, None] tmp204 = tmp203 - tmp58 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp61 tmp207 = tmp34 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = tl.where(xmask, tmp208, 0) tmp211 = tl.sum(tmp210, 1)[:, None] tmp213 = tmp212 - tmp58 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp61 tmp216 = tmp36 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = tl.where(xmask, tmp217, 0) tmp220 = tl.sum(tmp219, 1)[:, None] tmp222 = tmp221 - tmp58 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp61 tmp225 = tmp38 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = tl.where(xmask, tmp226, 0) tmp229 = tl.sum(tmp228, 1)[:, None] tmp231 = tmp230 - tmp58 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp61 tmp234 = tmp40 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = tl.where(xmask, tmp235, 0) tmp238 = tl.sum(tmp237, 1)[:, None] tmp240 = tmp239 - tmp58 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp61 tmp243 = tmp42 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = tl.where(xmask, tmp244, 0) tmp247 = tl.sum(tmp246, 1)[:, None] tmp249 = tmp248 - tmp58 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp61 tmp252 = tmp44 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = tl.where(xmask, tmp253, 0) tmp256 = tl.sum(tmp255, 1)[:, None] tmp258 = tmp257 - tmp58 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp61 tmp261 = tmp46 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = tl.where(xmask, tmp262, 0) tmp265 = tl.sum(tmp264, 1)[:, None] tmp267 = tmp266 - tmp58 tmp268 = tl_math.exp(tmp267) tmp269 = tmp268 / tmp61 tmp270 = tmp48 * tmp269 tmp271 = tl.broadcast_to(tmp270, [XBLOCK, RBLOCK]) tmp273 = tl.where(xmask, tmp271, 0) tmp274 = tl.sum(tmp273, 1)[:, None] tmp276 = tmp275 - tmp58 tmp277 = tl_math.exp(tmp276) tmp278 = tmp277 / tmp61 tmp279 = tmp50 * tmp278 tmp280 = tl.broadcast_to(tmp279, [XBLOCK, RBLOCK]) tmp282 = tl.where(xmask, tmp280, 0) tmp283 = tl.sum(tmp282, 1)[:, None] tmp285 = tmp284 - tmp58 tmp286 = tl_math.exp(tmp285) tmp287 = tmp286 / tmp61 tmp288 = tmp52 * tmp287 tmp289 = tl.broadcast_to(tmp288, [XBLOCK, RBLOCK]) tmp291 = tl.where(xmask, tmp289, 0) tmp292 = tl.sum(tmp291, 1)[:, None] tmp294 = tmp293 - tmp58 tmp295 = tl_math.exp(tmp294) tmp296 = tmp295 / tmp61 tmp297 = tmp54 * tmp296 tmp298 = tl.broadcast_to(tmp297, [XBLOCK, RBLOCK]) tmp300 = tl.where(xmask, tmp298, 0) tmp301 = tl.sum(tmp300, 1)[:, None] tmp303 = tmp302 - tmp58 tmp304 = tl_math.exp(tmp303) tmp305 = tmp304 / tmp61 tmp306 = tmp56 * tmp305 tmp307 = tl.broadcast_to(tmp306, [XBLOCK, RBLOCK]) tmp309 = tl.where(xmask, tmp307, 0) tmp310 = tl.sum(tmp309, 1)[:, None] tl.store(out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp4, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp6, xmask) tl.store(out_ptr3 + (r2 + 16 * x3), tmp8, xmask) tl.store(out_ptr4 + (r2 + 16 * x3), tmp10, xmask) tl.store(out_ptr5 + (r2 + 16 * x3), tmp12, xmask) tl.store(out_ptr6 + (r2 + 16 * x3), tmp14, xmask) tl.store(out_ptr7 + (r2 + 16 * x3), tmp16, xmask) tl.store(out_ptr8 + (r2 + 16 * x3), tmp18, xmask) tl.store(out_ptr9 + (r2 + 16 * x3), tmp20, xmask) tl.store(out_ptr10 + (r2 + 16 * x3), tmp22, xmask) tl.store(out_ptr11 + (r2 + 16 * x3), tmp24, xmask) tl.store(out_ptr12 + (r2 + 16 * x3), tmp26, xmask) tl.store(out_ptr13 + (r2 + 16 * x3), tmp28, xmask) tl.store(out_ptr14 + (r2 + 16 * x3), tmp30, xmask) tl.store(out_ptr15 + (r2 + 16 * x3), tmp32, xmask) tl.store(out_ptr16 + (r2 + 16 * x3), tmp34, xmask) tl.store(out_ptr17 + (r2 + 16 * x3), tmp36, xmask) tl.store(out_ptr18 + (r2 + 16 * x3), tmp38, xmask) tl.store(out_ptr19 + (r2 + 16 * x3), tmp40, xmask) tl.store(out_ptr20 + (r2 + 16 * x3), tmp42, xmask) tl.store(out_ptr21 + (r2 + 16 * x3), tmp44, xmask) tl.store(out_ptr22 + (r2 + 16 * x3), tmp46, xmask) tl.store(out_ptr23 + (r2 + 16 * x3), tmp48, xmask) tl.store(out_ptr24 + (r2 + 16 * x3), tmp50, xmask) tl.store(out_ptr25 + (r2 + 16 * x3), tmp52, xmask) tl.store(out_ptr26 + (r2 + 16 * x3), tmp54, xmask) tl.store(out_ptr27 + (r2 + 16 * x3), tmp56, xmask) tl.store(out_ptr28 + x3, tmp67, xmask) tl.store(out_ptr29 + x3, tmp76, xmask) tl.store(out_ptr30 + x3, tmp85, xmask) tl.store(out_ptr31 + x3, tmp94, xmask) tl.store(out_ptr32 + x3, tmp103, xmask) tl.store(out_ptr33 + x3, tmp112, xmask) tl.store(out_ptr34 + x3, tmp121, xmask) tl.store(out_ptr35 + x3, tmp130, xmask) tl.store(out_ptr36 + x3, tmp139, xmask) tl.store(out_ptr37 + x3, tmp148, xmask) tl.store(out_ptr38 + x3, tmp157, xmask) tl.store(out_ptr39 + x3, tmp166, xmask) tl.store(out_ptr40 + x3, tmp175, xmask) tl.store(out_ptr41 + x3, tmp184, xmask) tl.store(out_ptr42 + x3, tmp193, xmask) tl.store(out_ptr43 + x3, tmp202, xmask) tl.store(out_ptr44 + x3, tmp211, xmask) tl.store(out_ptr45 + x3, tmp220, xmask) tl.store(out_ptr46 + x3, tmp229, xmask) tl.store(out_ptr47 + x3, tmp238, xmask) tl.store(out_ptr48 + x3, tmp247, xmask) tl.store(out_ptr49 + x3, tmp256, xmask) tl.store(out_ptr50 + x3, tmp265, xmask) tl.store(out_ptr51 + x3, tmp274, xmask) tl.store(out_ptr52 + x3, tmp283, xmask) tl.store(out_ptr53 + x3, tmp292, xmask) tl.store(out_ptr54 + x3, tmp301, xmask) tl.store(out_ptr55 + x3, tmp310, xmask) @triton.jit def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (228 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (232 + x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (236 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (240 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (244 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (248 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (252 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (912 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr3 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tl.load(in_ptr4 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr2 + (928 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.load(in_ptr2 + (944 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp44 = tl.load(in_ptr2 + (960 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp53 = tl.load(in_ptr2 + (976 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp62 = tl.load(in_ptr2 + (992 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp71 = tl.load(in_ptr2 + (1008 + r2 + 1024 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp4 = tmp0 - tmp3 tmp6 = tmp0 - tmp5 tmp8 = tmp0 - tmp7 tmp10 = tmp0 - tmp9 tmp12 = tmp0 - tmp11 tmp14 = tmp0 - tmp13 tmp17 = tmp15 - tmp16 tmp18 = tl_math.exp(tmp17) tmp20 = tmp18 / tmp19 tmp21 = tmp2 * tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.where(xmask, tmp22, 0) tmp25 = tl.sum(tmp24, 1)[:, None] tmp27 = tmp26 - tmp16 tmp28 = tl_math.exp(tmp27) tmp29 = tmp28 / tmp19 tmp30 = tmp4 * tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.where(xmask, tmp31, 0) tmp34 = tl.sum(tmp33, 1)[:, None] tmp36 = tmp35 - tmp16 tmp37 = tl_math.exp(tmp36) tmp38 = tmp37 / tmp19 tmp39 = tmp6 * tmp38 tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK]) tmp42 = tl.where(xmask, tmp40, 0) tmp43 = tl.sum(tmp42, 1)[:, None] tmp45 = tmp44 - tmp16 tmp46 = tl_math.exp(tmp45) tmp47 = tmp46 / tmp19 tmp48 = tmp8 * tmp47 tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp51 = tl.where(xmask, tmp49, 0) tmp52 = tl.sum(tmp51, 1)[:, None] tmp54 = tmp53 - tmp16 tmp55 = tl_math.exp(tmp54) tmp56 = tmp55 / tmp19 tmp57 = tmp10 * tmp56 tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK]) tmp60 = tl.where(xmask, tmp58, 0) tmp61 = tl.sum(tmp60, 1)[:, None] tmp63 = tmp62 - tmp16 tmp64 = tl_math.exp(tmp63) tmp65 = tmp64 / tmp19 tmp66 = tmp12 * tmp65 tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = tl.where(xmask, tmp67, 0) tmp70 = tl.sum(tmp69, 1)[:, None] tmp72 = tmp71 - tmp16 tmp73 = tl_math.exp(tmp72) tmp74 = tmp73 / tmp19 tmp75 = tmp14 * tmp74 tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK]) tmp78 = tl.where(xmask, tmp76, 0) tmp79 = tl.sum(tmp78, 1)[:, None] tl.store(out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp4, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp6, xmask) tl.store(out_ptr3 + (r2 + 16 * x3), tmp8, xmask) tl.store(out_ptr4 + (r2 + 16 * x3), tmp10, xmask) tl.store(out_ptr5 + (r2 + 16 * x3), tmp12, xmask) tl.store(out_ptr6 + (r2 + 16 * x3), tmp14, xmask) tl.store(out_ptr7 + x3, tmp25, xmask) tl.store(out_ptr8 + x3, tmp34, xmask) tl.store(out_ptr9 + x3, tmp43, xmask) tl.store(out_ptr10 + x3, tmp52, xmask) tl.store(out_ptr11 + x3, tmp61, xmask) tl.store(out_ptr12 + x3, tmp70, xmask) tl.store(out_ptr13 + x3, tmp79, xmask) @triton.jit def triton_poi_fused_copy_zeros_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 64 x0 = xindex % 4 x2 = xindex // 256 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp10 & xmask, eviction_policy ='evict_last', other=0.0) tmp12 = tl.full([1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp15 & xmask, eviction_policy ='evict_last', other=0.0) tmp17 = tl.full([1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp20 & xmask, eviction_policy ='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (x0 + 4 * x2), tmp22 & xmask, eviction_policy ='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (x0 + 4 * x2), tmp34 & xmask, eviction_policy ='evict_last', other=0.0) tmp36 = tl.full([1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (x0 + 4 * x2), tmp39 & xmask, eviction_policy ='evict_last', other=0.0) tmp41 = tl.full([1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (x0 + 4 * x2), tmp44 & xmask, eviction_policy ='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (x0 + 4 * x2), tmp48 & xmask, eviction_policy ='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (x0 + 4 * x2), tmp58 & xmask, eviction_policy ='evict_last', other=0.0) tmp60 = tl.full([1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (x0 + 4 * x2), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (x0 + 4 * x2), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (x0 + 4 * x2), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (x0 + 4 * x2), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (x0 + 4 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (x0 + 4 * x2), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (x0 + 4 * x2), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (x0 + 4 * x2), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (x0 + 4 * x2), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (x0 + 4 * x2), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (x0 + 4 * x2), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (x0 + 4 * x2), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (x0 + 4 * x2), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (x0 + 4 * x2), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (x0 + 4 * x2), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (x0 + 4 * x2), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (x0 + 4 * x2), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (x0 + 4 * x2), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (x0 + 4 * x2), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (x0 + 4 * x2), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (x0 + 4 * x2), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (x0 + 4 * x2), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (x0 + 4 * x2), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (x0 + 4 * x2), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (x0 + 4 * x2), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (x0 + 4 * x2), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (x0 + 4 * x2), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (x0 + 4 * x2), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (x0 + 4 * x2), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (x0 + 4 * x2), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (x0 + 4 * x2), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (x0 + 4 * x2), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (x0 + 4 * x2), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (x0 + 4 * x2), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (x0 + 4 * x2), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (x0 + 4 * x2), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (x0 + 4 * x2), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (x0 + 4 * x2), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (x0 + 4 * x2), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (x0 + 4 * x2), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (x0 + 4 * x2), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (x0 + 4 * x2), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (x0 + 4 * x2), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (x0 + 4 * x2), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (x0 + 4 * x2), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (x0 + 4 * x2), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (x0 + 4 * x2), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (x0 + 4 * x2), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (x0 + 4 * x2), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (x0 + 4 * x2), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (x0 + 4 * x2), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (x0 + 4 * x2), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (x0 + 4 * x2), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (x0 + 4 * x2), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tl.store(in_out_ptr0 + x3, tmp380, xmask) @triton.jit def triton_red_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): xnumel = 4 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex r2 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r3 + 256 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4 * r2 + 256 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (1 + 4 * r2 + 256 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (2 + 4 * r2 + 256 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr0 + (3 + 4 * r2 + 256 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tl.store(out_ptr0 + (r3 + 256 * x0), tmp15, rmask & xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tmp20 = libdevice.sqrt(tmp18) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp21 = tl.load(out_ptr0 + (r3 + 256 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp22 = 1e-12 tmp23 = triton_helpers.maximum(tmp20, tmp22) tmp24 = tmp21 / tmp23 tl.store(out_ptr1 + (r3 + 256 * x0), tmp24, rmask & xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (64, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32) buf2 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(64)](buf0, buf1, buf2, 64, 64, XBLOCK=32, num_warps=8, num_stages=1) buf4 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf6 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf8 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf10 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf13 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf15 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf17 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf19 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf22 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf24 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf26 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf28 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf31 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf33 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf35 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf37 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf40 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf42 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf44 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf46 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf49 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf51 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf53 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf55 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf58 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf60 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf62 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf64 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf3 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf23 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf32 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf41 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf50 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf59 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_per_fused_mul_sub_sum_1[grid(16)](primals_1, primals_3, buf0, buf1, buf2, buf4, buf6, buf8, buf10, buf13, buf15, buf17, buf19, buf22, buf24, buf26, buf28, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf49, buf51, buf53, buf55, buf58, buf60, buf62, buf64, buf3, buf5, buf7, buf9, buf11, buf14, buf16, buf18, buf20, buf23, buf25, buf27, buf29, buf32, buf34, buf36, buf38, buf41, buf43, buf45, buf47, buf50, buf52, buf54, buf56, buf59, buf61, buf63, buf65, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf67 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf69 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf71 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf73 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf76 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf78 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf80 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf82 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf85 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf87 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf89 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf91 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf94 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf96 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf98 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf100 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf103 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf105 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf107 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf109 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf112 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf114 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf116 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf118 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf121 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf123 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf125 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf127 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf68 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf70 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf77 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf86 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf95 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf104 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf113 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf122 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_per_fused_mul_sub_sum_2[grid(16)](primals_1, primals_3, buf0, buf1, buf2, buf67, buf69, buf71, buf73, buf76, buf78, buf80, buf82, buf85, buf87, buf89, buf91, buf94, buf96, buf98, buf100, buf103, buf105, buf107, buf109, buf112, buf114, buf116, buf118, buf121, buf123, buf125, buf127, buf68, buf70, buf72, buf74, buf77, buf79, buf81, buf83, buf86, buf88, buf90, buf92, buf95, buf97, buf99, buf101, buf104, buf106, buf108, buf110, buf113, buf115, buf117, buf119, buf122, buf124, buf126, buf128, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf130 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf132 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf134 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf136 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf139 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf141 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf143 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch. float32) buf131 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf133 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf140 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_per_fused_mul_sub_sum_3[grid(16)](primals_1, primals_3, buf0, buf1, buf2, buf130, buf132, buf134, buf136, buf139, buf141, buf143, buf131, buf133, buf135, buf137, buf140, buf142, buf144, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf12 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32) buf21 = buf12 del buf12 buf30 = buf21 del buf21 buf39 = buf30 del buf30 buf48 = buf39 del buf39 buf57 = buf48 del buf48 buf66 = buf57 del buf57 buf75 = buf66 del buf66 buf84 = buf75 del buf75 buf93 = buf84 del buf84 buf102 = buf93 del buf93 buf111 = buf102 del buf102 buf120 = buf111 del buf111 buf129 = buf120 del buf120 buf138 = buf129 del buf129 buf145 = buf138 del buf138 triton_poi_fused_copy_zeros_4[grid(1024)](buf145, buf11, buf9, buf7, buf5, buf3, buf20, buf18, buf16, buf14, buf29, buf27, buf25, buf23, buf38, buf36, buf34, buf32, buf47, buf45, buf43, buf41, buf56, buf54, buf52, buf50, buf65, buf63, buf61, buf59, buf74, buf72, buf70, buf68, buf83, buf81, buf79, buf77, buf92, buf90, buf88, buf86, buf101, buf99, buf97, buf95, buf110, buf108, buf106, buf104, buf119, buf117, buf115, buf113, buf128, buf126, buf124, buf122, buf137, buf135, buf133, buf131, buf144, buf142, buf140, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf101 del buf104 del buf106 del buf108 del buf11 del buf110 del buf113 del buf115 del buf117 del buf119 del buf122 del buf124 del buf126 del buf128 del buf131 del buf133 del buf135 del buf137 del buf14 del buf140 del buf142 del buf144 del buf16 del buf18 del buf20 del buf23 del buf25 del buf27 del buf29 del buf3 del buf32 del buf34 del buf36 del buf38 del buf41 del buf43 del buf45 del buf47 del buf5 del buf50 del buf52 del buf54 del buf56 del buf59 del buf61 del buf63 del buf65 del buf68 del buf7 del buf70 del buf72 del buf74 del buf77 del buf79 del buf81 del buf83 del buf86 del buf88 del buf9 del buf90 del buf92 del buf95 del buf97 del buf99 buf146 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32) buf147 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf148 = reinterpret_tensor(buf147, (4, 1), (1, 1), 0) del buf147 buf149 = empty_strided_cuda((4, 256), (256, 1), torch.float32) triton_red_fused_div_linalg_vector_norm_5[grid(4)](buf148, buf145, buf146, buf149, 4, 256, XBLOCK=1, RBLOCK=256, num_warps=2, num_stages=1) del buf146 return (buf149, primals_1, primals_2, buf0, buf1, buf2, reinterpret_tensor(primals_3, (1, 4), (4, 1), 0), buf4, buf6, buf8, buf10, buf13, buf15, buf17, buf19, buf22, buf24, buf26, buf28, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf49, buf51, buf53, buf55, buf58, buf60, buf62, buf64, buf67, buf69, buf71, buf73, buf76, buf78, buf80, buf82, buf85, buf87, buf89, buf91, buf94, buf96, buf98, buf100, buf103, buf105, buf107, buf109, buf112, buf114, buf116, buf118, buf121, buf123, buf125, buf127, buf130, buf132, buf134, buf136, buf139, buf141, buf143, buf145, buf148) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, dim, num_clusters=64): """ Args: dim : int Dimension of descriptors num_clusters : int The number of clusters """ super(NetVLADNew, self).__init__() self.num_clusters = num_clusters self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=False ) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) def init_params(self, clsts, traindescs): clsts_assign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clsts_assign, traindescs.T) dots.sort(0) dots = dots[::-1, :] alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(alpha * clsts_assign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None def forward(self, input_0): primals_3 = self.centroids primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lulor/project_vg
NetVLAD
false
7,269
[ "MIT" ]
1
27b0c3b3038c5a666dde516a0a265ae8ddf2059f
https://github.com/lulor/project_vg/tree/27b0c3b3038c5a666dde516a0a265ae8ddf2059f
DuelingNet
import torch from torch import nn import torch.nn.functional as F class DuelingNet(nn.Module): def __init__(self, n_in, n_mid, n_out): super(DuelingNet, self).__init__() self.fc1 = nn.Linear(n_in, n_mid) self.fc2 = nn.Linear(n_mid, n_mid) self.fc3_adv = nn.Linear(n_mid, n_out) self.fc3_val = nn.Linear(n_mid, 1) def forward(self, x): h1 = F.relu(self.fc1(x)) h2 = F.relu(self.fc2(h1)) adv = self.fc3_adv(h2) val = self.fc3_val(h2).expand(-1, adv.size(1)) output = val + adv - adv.mean(1, keepdim=True).expand(-1, adv.size(1)) return output def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_in': 4, 'n_mid': 4, 'n_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp12 = tmp10 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = tmp5 - tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (4, 1), (1, 4 ), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_sub_1[grid(16)](buf5, primals_9, buf4, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del buf5 del primals_9 return buf6, primals_3, buf1, buf3, primals_8, primals_6, primals_4 class DuelingNetNew(nn.Module): def __init__(self, n_in, n_mid, n_out): super(DuelingNetNew, self).__init__() self.fc1 = nn.Linear(n_in, n_mid) self.fc2 = nn.Linear(n_mid, n_mid) self.fc3_adv = nn.Linear(n_mid, n_out) self.fc3_val = nn.Linear(n_mid, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_3 = self.fc2.weight primals_5 = self.fc2.bias primals_4 = self.fc3_adv.weight primals_7 = self.fc3_adv.bias primals_8 = self.fc3_val.weight primals_9 = self.fc3_val.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
moriaki3193/Torch26
DuelingNet
false
7,271
[ "MIT" ]
1
fb75f6b6bb07c63fedb03fad7b647837eb40db2e
https://github.com/moriaki3193/Torch26/tree/fb75f6b6bb07c63fedb03fad7b647837eb40db2e
AveragePooling
import torch import torch.nn as nn class AveragePooling(nn.Module): def __init__(self): super(AveragePooling, self).__init__() """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, x, x_mask): """ x_output: num_items x input_size x 1 --> num_items x input_size """ x_now = x.clone() empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x_now) x_now.data.masked_fill_(empty_mask.data, 0) x_sum = torch.sum(x_now, 1) x_num = torch.sum(x_mask.eq(1).float(), 1).unsqueeze(1).expand_as(x_sum ) x_num = torch.clamp(x_num, min=1) return x_sum / x_num def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_masked_fill_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask) tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask) tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tl.where(tmp2, tmp1, tmp3) tmp6 = tmp5 == tmp1 tmp8 = tl.where(tmp6, tmp1, tmp7) tmp9 = tmp4 + tmp8 tmp11 = tmp10 == tmp1 tmp13 = tl.where(tmp11, tmp1, tmp12) tmp14 = tmp9 + tmp13 tmp16 = tmp15 == tmp1 tmp18 = tl.where(tmp16, tmp1, tmp17) tmp19 = tmp14 + tmp18 tmp20 = 1.0 tmp21 = tmp0 == tmp20 tmp22 = tmp21.to(tl.float32) tmp23 = tmp5 == tmp20 tmp24 = tmp23.to(tl.float32) tmp25 = tmp22 + tmp24 tmp26 = tmp10 == tmp20 tmp27 = tmp26.to(tl.float32) tmp28 = tmp25 + tmp27 tmp29 = tmp15 == tmp20 tmp30 = tmp29.to(tl.float32) tmp31 = tmp28 + tmp30 tmp32 = triton_helpers.maximum(tmp31, tmp20) tmp33 = tmp19 / tmp32 tl.store(out_ptr0 + x4, tmp33, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_masked_fill_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class AveragePoolingNew(nn.Module): def __init__(self): super(AveragePoolingNew, self).__init__() """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mpandeydev/SDnetmod
AveragePooling
false
7,272
[ "MIT" ]
1
c8cdf6150e3cd28330359a7d81df236729522a69
https://github.com/mpandeydev/SDnetmod/tree/c8cdf6150e3cd28330359a7d81df236729522a69
SinenetComponent
import torch class SinenetComponent(torch.nn.Module): def __init__(self, time_len, i): super().__init__() self.time_len = time_len self.i = i self.t_wav = 1.0 / 16000 self.log_f_mean = 5.02654 self.log_f_std = 0.373288 self.a = torch.nn.Parameter(torch.Tensor(1)) self.phi = torch.nn.Parameter(torch.Tensor(1)) def forward(self, x, f, t): i_f = torch.mul(self.i, f) i_f_t = torch.mul(i_f, t) deg = torch.add(i_f_t, self.phi) s = torch.sin(deg) self.W = torch.mul(self.a, s) h_SBT = torch.mul(self.W, x) h_SB = torch.sum(h_SBT, dim=-1, keepdim=False) return h_SB def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'time_len': 4, 'i': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_sin_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp5 = tl.load(in_ptr2 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp8 = tl.load(in_ptr3 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp1 = 4.0 tmp2 = tmp1 * tmp0 tmp4 = tmp2 * tmp3 tmp7 = tmp4 + tmp6 tmp10 = tl_math.sin(tmp7) tmp11 = tmp9 * tmp10 tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sin_0[grid(256)](primals_1, primals_2, primals_3, primals_4, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(64)](buf1, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, buf1, primals_4, primals_5, buf0 class SinenetComponentNew(torch.nn.Module): def __init__(self, time_len, i): super().__init__() self.time_len = time_len self.i = i self.t_wav = 1.0 / 16000 self.log_f_mean = 5.02654 self.log_f_std = 0.373288 self.a = torch.nn.Parameter(torch.Tensor(1)) self.phi = torch.nn.Parameter(torch.Tensor(1)) def forward(self, input_0, input_1, input_2): primals_3 = self.a primals_4 = self.phi primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
moquan/22_Nov_2018
SinenetComponent
false
7,273
[ "MIT" ]
1
eaa81bf5050d74612fe1322abcdb26a0a919e976
https://github.com/moquan/22_Nov_2018/tree/eaa81bf5050d74612fe1322abcdb26a0a919e976
Net3
import torch from torch import nn class Net3(nn.Module): """ Net3 is a neural network consisting of four hidden layers with sizes 400, 300, 300 and 70 """ layer_sizes = [400, 300, 300, 70] hidden1 = 400 hidden2 = 300 hidden3 = 300 hidden4 = 70 def __init__(self, input_size): super(Net3, self).__init__() self.fc1 = nn.Linear(input_size, self.hidden1) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(self.hidden1, self.hidden2) self.relu2 = nn.ReLU() self.fc3 = nn.Linear(self.hidden2, self.hidden3) self.relu3 = nn.ReLU() self.fc4 = nn.Linear(self.hidden3, self.hidden4) self.relu4 = nn.ReLU() self.fc5 = nn.Linear(self.hidden4, 1) def forward(self, x): out = self.fc1(x) out = self.relu1(out) out = self.fc2(out) out = self.relu2(out) out = self.fc3(out) out = self.relu3(out) out = self.fc4(out) out = self.relu4(out) out = self.fc5(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 70 x2 = xindex % 1120 x3 = xindex // 1120 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1152 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300,), (1,)) assert_size_stride(primals_6, (300, 300), (300, 1)) assert_size_stride(primals_7, (300,), (1,)) assert_size_stride(primals_8, (70, 300), (300, 1)) assert_size_stride(primals_9, (70,), (1,)) assert_size_stride(primals_10, (1, 70), (70, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf15 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf15, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2, primals_5, buf3, buf14, 19200, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK =128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 300), ( 1, 300), 0), out=buf5) buf6 = buf3 del buf3 buf13 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf5, primals_7, buf6, buf13, 19200, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf7 = buf5 del buf5 triton_poi_fused_relu_view_2[grid(19200)](buf6, buf7, 19200, XBLOCK =128, num_warps=4, num_stages=1) del buf6 buf8 = empty_strided_cuda((64, 70), (70, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (300, 70), (1, 300), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 70), (1120, 280, 70, 1), 0) del buf8 buf12 = empty_strided_cuda((4, 4, 4, 70), (1152, 280, 70, 1), torch .bool) triton_poi_fused_relu_threshold_backward_3[grid(4480)](buf9, primals_9, buf12, 4480, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (64, 70), (70, 1), 0), reinterpret_tensor(primals_10, (70, 1), (1, 70), 0 ), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), buf4, buf7, reinterpret_tensor(buf9, (64, 70), (70, 1), 0 ), primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15 class Net3New(nn.Module): """ Net3 is a neural network consisting of four hidden layers with sizes 400, 300, 300 and 70 """ layer_sizes = [400, 300, 300, 70] hidden1 = 400 hidden2 = 300 hidden3 = 300 hidden4 = 70 def __init__(self, input_size): super(Net3New, self).__init__() self.fc1 = nn.Linear(input_size, self.hidden1) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(self.hidden1, self.hidden2) self.relu2 = nn.ReLU() self.fc3 = nn.Linear(self.hidden2, self.hidden3) self.relu3 = nn.ReLU() self.fc4 = nn.Linear(self.hidden3, self.hidden4) self.relu4 = nn.ReLU() self.fc5 = nn.Linear(self.hidden4, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_10 = self.fc5.weight primals_11 = self.fc5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
moritzschaefer/pavooc
Net3
false
7,274
[ "MIT" ]
1
735f5455f9a95a5734436a24e2aa92cf600c91af
https://github.com/moritzschaefer/pavooc/tree/735f5455f9a95a5734436a24e2aa92cf600c91af
MaxPooling
import torch import torch.nn as nn class MaxPooling(nn.Module): def __init__(self): super(MaxPooling, self).__init__() self.MIN = -1000000.0 """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, x, x_mask): """ x_output: num_items x input_size x 1 --> num_items x input_size """ empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x) x_now = x.clone() x_now.data.masked_fill_(empty_mask.data, self.MIN) x_output = x_now.max(1)[0] x_output.data.masked_fill_(x_output.data.eq(self.MIN), 0) return x_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_masked_fill_max_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp7 = tmp6 == tmp1 tmp9 = tl.where(tmp7, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp12 = tmp11 == tmp1 tmp14 = tl.where(tmp12, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp17 = tmp16 == tmp1 tmp19 = tl.where(tmp17, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp20 == tmp4 tmp22 = tl.where(tmp21, tmp1, tmp20) tl.store(in_out_ptr0 + x2, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_eq_masked_fill_max_0[grid(16)](buf1, arg0_1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class MaxPoolingNew(nn.Module): def __init__(self): super(MaxPoolingNew, self).__init__() self.MIN = -1000000.0 """ (item, subitem) can be (word, characters), or (sentence, words) x: num_items x max_subitem_size x input_size x_mask: num_items x max_subitem_size return num_items x input_size """ def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mpandeydev/SDnetmod
MaxPooling
false
7,275
[ "MIT" ]
1
c8cdf6150e3cd28330359a7d81df236729522a69
https://github.com/mpandeydev/SDnetmod/tree/c8cdf6150e3cd28330359a7d81df236729522a69
Actor
import torch import torch.nn.functional as F import torch.nn as nn class Actor(torch.nn.Module): def __init__(self, numObs, numActions): super(Actor, self).__init__() self.actor_input = nn.Linear(numObs, 32) self.actor_fc1 = nn.Linear(32, 32) self.actor_output = nn.Linear(32, numActions) def forward(self, x): x = F.relu(self.actor_input(x)) x = F.relu(self.actor_fc1(x)) logits = self.actor_output(x) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'numObs': 4, 'numActions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6 class ActorNew(torch.nn.Module): def __init__(self, numObs, numActions): super(ActorNew, self).__init__() self.actor_input = nn.Linear(numObs, 32) self.actor_fc1 = nn.Linear(32, 32) self.actor_output = nn.Linear(32, numActions) def forward(self, input_0): primals_1 = self.actor_input.weight primals_2 = self.actor_input.bias primals_4 = self.actor_fc1.weight primals_5 = self.actor_fc1.bias primals_6 = self.actor_output.weight primals_7 = self.actor_output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mpgussert/fundamentalRL
Actor
false
7,276
[ "MIT" ]
1
4f45436226e0823c21cac316dec8bbf1df697467
https://github.com/mpgussert/fundamentalRL/tree/4f45436226e0823c21cac316dec8bbf1df697467
Agent
import torch import torch.nn.functional as F import torch.nn as nn class Agent(torch.nn.Module): def __init__(self, numObs, numActions): super(Agent, self).__init__() self.critic_input = nn.Linear(numObs, 32) self.critic_fc1 = nn.Linear(32, 32) self.critic_output = nn.Linear(32, 1) self.actor_input = nn.Linear(numObs, 32) self.actor_fc1 = nn.Linear(32, 32) self.actor_output = nn.Linear(32, numActions) def forward(self, x): y = F.relu(self.actor_input(x)) y = F.relu(self.actor_fc1(y)) logits = self.actor_output(y) z = F.relu(self.critic_input(x)) z = F.relu(self.critic_fc1(z)) value = self.critic_output(z) return logits, value def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'numObs': 4, 'numActions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (32, 4), (4, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 32), (32, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (1, 32), (32, 1)) assert_size_stride(primals_13, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf14 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf14, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf13 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf13, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 32), (1, 4), 0), out=buf5) del primals_8 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf5 buf12 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf6, primals_9, buf12, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf7 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 32), (32, 1), 0), reinterpret_tensor(primals_10, (32, 32), (1, 32), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf7 buf11 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf8, primals_11, buf11, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 32), (32, 1), 0), reinterpret_tensor(primals_12, (32, 1), (1, 32), 0 ), alpha=1, beta=1, out=buf10) del primals_13 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf6, (64, 32), (32, 1), 0), reinterpret_tensor(buf8, (64, 32), (32, 1), 0 ), primals_12, buf11, primals_10, buf12, primals_6, buf13, primals_4, buf14 class AgentNew(torch.nn.Module): def __init__(self, numObs, numActions): super(AgentNew, self).__init__() self.critic_input = nn.Linear(numObs, 32) self.critic_fc1 = nn.Linear(32, 32) self.critic_output = nn.Linear(32, 1) self.actor_input = nn.Linear(numObs, 32) self.actor_fc1 = nn.Linear(32, 32) self.actor_output = nn.Linear(32, numActions) def forward(self, input_0): primals_1 = self.critic_input.weight primals_2 = self.critic_input.bias primals_4 = self.critic_fc1.weight primals_5 = self.critic_fc1.bias primals_12 = self.critic_output.weight primals_13 = self.critic_output.bias primals_8 = self.actor_input.weight primals_9 = self.actor_input.bias primals_10 = self.actor_fc1.weight primals_11 = self.actor_fc1.bias primals_6 = self.actor_output.weight primals_7 = self.actor_output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
mpgussert/fundamentalRL
Agent
false
7,277
[ "MIT" ]
1
4f45436226e0823c21cac316dec8bbf1df697467
https://github.com/mpgussert/fundamentalRL/tree/4f45436226e0823c21cac316dec8bbf1df697467