entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
PositionwiseFeedForward
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__to_copy_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .float32) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 + tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp9.to(tl.float32) tmp12 = tmp10 + tmp11 tmp13 = tmp8 + tmp12 tmp15 = tmp14.to(tl.float32) tmp17 = tmp15 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp5 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](primals_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_2, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf3, primals_3, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf4 = buf1 del buf1 triton_poi_fused__to_copy_1[grid(16)](primals_4, buf4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf5 = empty_strided_cuda((4,), (1,), torch.float16) triton_poi_fused__to_copy_3[grid(4)](primals_5, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float16) extern_kernels.addmm(buf5, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4), (1, 4), 0), alpha=1, beta= 1, out=buf6) del buf5 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](buf6, primals_1, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(256)](buf6, primals_1, buf7, buf8, primals_6, primals_7, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del buf8 del primals_7 return buf9, primals_1, primals_6, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, buf4, buf10 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
bahducoup/factorized_training
PositionwiseFeedForward
false
12,156
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
MultiHeadAttention
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc(q)) q += residual q = self.layer_norm(q) return q, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last').to(tl.float32) tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .float32) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 + tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp9.to(tl.float32) tmp12 = tmp10 + tmp11 tmp13 = tmp8 + tmp12 tmp15 = tmp14.to(tl.float32) tmp17 = tmp15 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp5 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 16), (16, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_4, buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 16), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused__to_copy_0[grid(64)](primals_2, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_2 buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_5, buf4, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 16), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 triton_poi_fused__to_copy_0[grid(64)](primals_3, buf6, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_6, buf7, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_6 buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf6, (16, 4), (4, 1), 0), reinterpret_tensor(buf7, (4, 16), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_1[grid(256)](buf2, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_clone_2[grid(64, 4)](buf5, buf10, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), out=buf11 ) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf11, buf12, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf14 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf11 triton_poi_fused__softmax__to_copy_4[grid(256)](buf12, buf13, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf12 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_5[grid(256)](buf8, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0) del buf8 extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16 ) buf17 = reinterpret_tensor(buf7, (16, 4), (1, 16), 0) del buf7 triton_poi_fused__to_copy_0[grid(64)](primals_7, buf17, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_7 buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_5[grid(256)](buf16, buf18, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf16 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf18, (16, 16), (16, 1), 0), buf17, out=buf19) buf20 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf21 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(16)](buf19, primals_1, buf20, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(64)](buf19, primals_1, buf20, buf21, primals_8, primals_9, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf20 del buf21 del primals_9 return buf22, buf13, primals_1, primals_8, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf6, (16, 4), (4, 1), 0 ), buf13, reinterpret_tensor(buf18, (16, 16), (16, 1), 0 ), buf19, reinterpret_tensor(buf17, (4, 16), (16, 1), 0 ), reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf15, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttentionNew(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, input_0, input_1, input_2): primals_4 = self.w_qs.weight primals_5 = self.w_ks.weight primals_6 = self.w_vs.weight primals_7 = self.fc.weight primals_8 = self.layer_norm.weight primals_9 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
bahducoup/factorized_training
MultiHeadAttention
false
12,157
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
ThreeLayerSemSegNet
import torch import torch.nn as nn class ThreeLayerSemSegNet(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 8, kernel_size=3, padding= 1, stride=1) self.conv2d1 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3 = torch.nn.Conv2d(8, out_channel, kernel_size=3, padding =1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) def forward(self, x): x = self.conv1(x) x = self.batchnorm1(x) x = self.ReLU1(x) x1 = self.conv2d1(x) x2 = self.conv2d5(x) x = torch.cat((x1, x2), dim=1) x = self.batchnorm2(x) x = self.ReLU2(x) x = self.conv3(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 8 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 128 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp7 = tmp4 + tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp12 = tmp9 + tmp11 tmp13 = triton_helpers.maximum(tmp8, tmp12) tmp17 = tmp14 + tmp16 tmp18 = triton_helpers.maximum(tmp13, tmp17) tmp19 = tmp3 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp7 - tmp18 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp12 - tmp18 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tmp17 - tmp18 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tl.store(out_ptr0 + x2, tmp18, xmask) tl.store(out_ptr1 + x2, tmp29, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = tl_math.log(tmp5) tmp7 = tmp4 - tmp6 tl.store(in_out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8,), (1,)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (8,), (1,)) assert_size_stride(primals_11, (8,), (1,)) assert_size_stride(primals_12, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf3 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf5 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(8)](buf1, buf2, buf3, buf5, 8, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_relu_2[grid(512)](buf1, buf2, buf3, primals_4, primals_5, buf6, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(512)](buf7, primals_7, buf8, primals_9, buf9, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del buf8 del primals_7 del primals_9 buf10 = buf3 del buf3 buf11 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf13 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(8)](buf9, buf10, buf11, buf13, 8, 64, XBLOCK=8, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused__native_batch_norm_legit_relu_2[grid(512)](buf9, buf10, buf11, primals_10, primals_11, buf14, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf11 del primals_11 buf15 = extern_kernels.convolution(buf14, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__log_softmax_convolution_4[grid(64)](buf15, primals_13, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = buf15 del buf15 triton_poi_fused__log_softmax_convolution_5[grid(256)](buf18, primals_13, buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf16 del buf17 del primals_13 return (buf18, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, buf1, reinterpret_tensor(buf5, (8,), (1,), 0), buf6, buf9, reinterpret_tensor(buf13, (8,), (1,), 0), buf14, buf18, reinterpret_tensor(buf10, (1, 8, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 8, 1, 1), (8, 1, 1, 1), 0)) class ThreeLayerSemSegNetNew(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 8, kernel_size=3, padding= 1, stride=1) self.conv2d1 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3 = torch.nn.Conv2d(8, out_channel, kernel_size=3, padding =1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2d1.weight primals_7 = self.conv2d1.bias primals_8 = self.conv2d5.weight primals_9 = self.conv2d5.bias primals_12 = self.conv3.weight primals_13 = self.conv3.bias primals_4 = self.batchnorm1.weight primals_5 = self.batchnorm1.bias primals_10 = self.batchnorm2.weight primals_11 = self.batchnorm2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
benkoger/kasanka
ThreeLayerSemSegNet
false
12,158
[ "Apache-2.0" ]
0
d5b1d32b7abf54845af0832da577137397089001
https://github.com/benkoger/kasanka/tree/d5b1d32b7abf54845af0832da577137397089001
ThreeLayerSemSegNetWideViewHighDim
import torch import torch.nn as nn class ThreeLayerSemSegNetWideViewHighDim(nn.Module): """Each layer has more channels than the standard model""" def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 12, kernel_size=3, padding =1, stride=1) self.conv1d100 = torch.nn.Conv2d(in_channel, 4, kernel_size=3, padding=101, stride=1, dilation=101) self.conv2d1 = torch.nn.Conv2d(16, 8, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(16, 8, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3 = torch.nn.Conv2d(16, out_channel, kernel_size=3, padding=1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(16, track_running_stats= False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(16, track_running_stats= False, momentum=1.0) def forward(self, x): x1 = self.conv1(x) x2 = self.conv1d100(x) x = torch.cat((x1, x2), dim=1) x = self.batchnorm1(x) x = self.ReLU1(x) x1 = self.conv2d1(x) x2 = self.conv2d5(x) x = torch.cat((x1, x2), dim=1) x = self.batchnorm2(x) x = self.ReLU2(x) x = self.conv3(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 12, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 192 * x2), tmp4 & xmask, other=0.0 ) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 16, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-12 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 256 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 128 * x2), tmp4 & xmask, other=0.0 ) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 16, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 128 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-8 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp7 = tmp4 + tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp12 = tmp9 + tmp11 tmp13 = triton_helpers.maximum(tmp8, tmp12) tmp17 = tmp14 + tmp16 tmp18 = triton_helpers.maximum(tmp13, tmp17) tmp19 = tmp3 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp7 - tmp18 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp12 - tmp18 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tmp17 - tmp18 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tl.store(out_ptr0 + x2, tmp18, xmask) tl.store(out_ptr1 + x2, tmp29, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = tl_math.log(tmp5) tmp7 = tmp4 - tmp6 tl.store(in_out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (12, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (8,), (1,)) assert_size_stride(primals_10, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_11, (8,), (1,)) assert_size_stride(primals_12, (16,), (1,)) assert_size_stride(primals_13, (16,), (1,)) assert_size_stride(primals_14, (4, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 12, 4, 4), (192, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(101, 101), dilation=(101, 101), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_cat_0[grid(1024)](buf0, primals_2, buf1, primals_5, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_5 buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_1[grid(16)](buf2, buf3, buf4, buf6, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) triton_poi_fused__native_batch_norm_legit_relu_2[grid(1024)](buf2, buf3, buf4, primals_6, primals_7, buf7, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 8, 4, 4), (128, 16, 4, 1)) buf9 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 8, 4, 4), (128, 16, 4, 1)) buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused_cat_3[grid(1024)](buf8, primals_9, buf9, primals_11, buf10, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del buf9 del primals_11 del primals_9 buf11 = buf4 del buf4 buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_1[grid(16)](buf10, buf11, buf12, buf14, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf15 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused__native_batch_norm_legit_relu_2[grid(1024)](buf10, buf11, buf12, primals_12, primals_13, buf15, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1)) buf17 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__log_softmax_convolution_4[grid(64)](buf16, primals_15, buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = buf16 del buf16 triton_poi_fused__log_softmax_convolution_5[grid(256)](buf19, primals_15, buf17, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf17 del buf18 del primals_15 return (buf19, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf2, reinterpret_tensor(buf6, (16,), (1,), 0), buf7, buf10, reinterpret_tensor(buf14, (16,), (1,), 0), buf15, buf19, reinterpret_tensor(buf11, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class ThreeLayerSemSegNetWideViewHighDimNew(nn.Module): """Each layer has more channels than the standard model""" def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 12, kernel_size=3, padding =1, stride=1) self.conv1d100 = torch.nn.Conv2d(in_channel, 4, kernel_size=3, padding=101, stride=1, dilation=101) self.conv2d1 = torch.nn.Conv2d(16, 8, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(16, 8, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3 = torch.nn.Conv2d(16, out_channel, kernel_size=3, padding=1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(16, track_running_stats= False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(16, track_running_stats= False, momentum=1.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv1d100.weight primals_5 = self.conv1d100.bias primals_8 = self.conv2d1.weight primals_9 = self.conv2d1.bias primals_10 = self.conv2d5.weight primals_11 = self.conv2d5.bias primals_14 = self.conv3.weight primals_15 = self.conv3.bias primals_6 = self.batchnorm1.weight primals_7 = self.batchnorm1.bias primals_12 = self.batchnorm2.weight primals_13 = self.batchnorm2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
benkoger/kasanka
ThreeLayerSemSegNetWideViewHighDim
false
12,159
[ "Apache-2.0" ]
0
d5b1d32b7abf54845af0832da577137397089001
https://github.com/benkoger/kasanka/tree/d5b1d32b7abf54845af0832da577137397089001
LowRankMultiHeadAttention
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.fc_u = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_v = nn.Linear(int(d_model / 4), d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, _len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs_v(self.w_qs_u(q)).view(sz_b, len_q, n_head, d_k) k = self.w_ks_v(self.w_ks_u(k)).view(sz_b, len_k, n_head, d_k) v = self.w_vs_v(self.w_vs_u(v)).view(sz_b, len_k, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_v(self.fc_u(q))) q += residual q = self.layer_norm(q) return q, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last').to(tl.float32) tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_t_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .float32) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 + tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp9.to(tl.float32) tmp12 = tmp10 + tmp11 tmp13 = tmp8 + tmp12 tmp15 = tmp14.to(tl.float32) tmp17 = tmp15 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp5 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (1, 16), (16, 1)) assert_size_stride(primals_11, (4, 1), (1, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_4, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_5, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_2, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_2 buf6 = buf1 del buf1 triton_poi_fused__to_copy_1[grid(16)](primals_6, buf6, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_6 buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_7, buf8, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_7 buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf7, reinterpret_tensor(buf8, (4, 16), (1, 4), 0 ), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_3, buf10, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf11 = buf6 del buf6 triton_poi_fused__to_copy_1[grid(16)](primals_8, buf11, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_8 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(buf11, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_9, buf13, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_9 buf14 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf12, reinterpret_tensor(buf13, (4, 16), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf4, buf15, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_clone_3[grid(64, 4)](buf9, buf16, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf17 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0) del buf9 extern_kernels.bmm(reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0), out=buf17 ) buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf17, buf18, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf20 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf17 triton_poi_fused__softmax__to_copy_5[grid(256)](buf18, buf19, buf20, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf18 buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf14, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1) buf22 = reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0) del buf14 extern_kernels.bmm(reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), out=buf22 ) buf23 = reinterpret_tensor(buf11, (16, 1), (1, 16), 0) del buf11 triton_poi_fused__to_copy_1[grid(16)](primals_10, buf23, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_10 buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf22, buf24, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf22 buf25 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf24, (16, 16), (16, 1), 0), buf23, out=buf25) buf26 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_11, buf26, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_11 buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf25, buf26, out=buf27) buf28 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf29 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(16)](buf27, primals_1, buf28, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](buf27, primals_1, buf28, buf29, primals_12, primals_13, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf28 del buf29 del primals_13 return buf30, buf19, primals_1, primals_12, reinterpret_tensor(buf0, ( 16, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), buf2, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf8, (4, 16), (1, 4), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf13, (4, 16), (1, 4), 0 ), buf12, buf19, reinterpret_tensor(buf24, (16, 16), (16, 1), 0 ), buf25, buf27, reinterpret_tensor(buf26, (4, 1), (1, 1), 0 ), reinterpret_tensor(buf23, (1, 16), (16, 1), 0), reinterpret_tensor( buf20, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf21, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf16, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankMultiHeadAttentionNew(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.fc_u = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_v = nn.Linear(int(d_model / 4), d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, input_0, input_1, input_2): primals_4 = self.w_qs_u.weight primals_5 = self.w_qs_v.weight primals_6 = self.w_ks_u.weight primals_7 = self.w_ks_v.weight primals_8 = self.w_vs_u.weight primals_9 = self.w_vs_v.weight primals_10 = self.fc_u.weight primals_11 = self.fc_v.weight primals_12 = self.layer_norm.weight primals_13 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
bahducoup/factorized_training
LowRankMultiHeadAttention
false
12,160
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
ThreeLayerSemSegNetWideView
import torch import torch.nn as nn class ThreeLayerSemSegNetWideView(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 6, kernel_size=3, padding= 1, stride=1) self.conv1d100 = torch.nn.Conv2d(in_channel, 2, kernel_size=3, padding=101, stride=1, dilation=101) self.conv2d1 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3 = torch.nn.Conv2d(8, out_channel, kernel_size=3, padding =1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) def forward(self, x): x1 = self.conv1(x) x2 = self.conv1d100(x) x = torch.cat((x1, x2), dim=1) x = self.batchnorm1(x) x = self.ReLU1(x) x1 = self.conv2d1(x) x2 = self.conv2d5(x) x = torch.cat((x1, x2), dim=1) x = self.batchnorm2(x) x = self.ReLU2(x) x = self.conv3(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 6, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-6 + x1) + 32 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-6 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 8 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 128 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp7 = tmp4 + tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp12 = tmp9 + tmp11 tmp13 = triton_helpers.maximum(tmp8, tmp12) tmp17 = tmp14 + tmp16 tmp18 = triton_helpers.maximum(tmp13, tmp17) tmp19 = tmp3 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp7 - tmp18 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp12 - tmp18 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tmp17 - tmp18 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tl.store(out_ptr0 + x2, tmp18, xmask) tl.store(out_ptr1 + x2, tmp29, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = tl_math.log(tmp5) tmp7 = tmp4 - tmp6 tl.store(in_out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (6, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (8,), (1,)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (8,), (1,)) assert_size_stride(primals_13, (8,), (1,)) assert_size_stride(primals_14, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 4, 4), (96, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(101, 101), dilation=(101, 101), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 2, 4, 4), (32, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, primals_2, buf1, primals_5, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_5 buf3 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf4 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf6 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(8)](buf2, buf3, buf4, buf6, 8, 64, XBLOCK=8, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_relu_2[grid(512)](buf2, buf3, buf4, primals_6, primals_7, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_3[grid(512)](buf8, primals_9, buf9, primals_11, buf10, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del buf9 del primals_11 del primals_9 buf11 = buf4 del buf4 buf12 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf14 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(8)](buf10, buf11, buf12, buf14, 8, 64, XBLOCK=8, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused__native_batch_norm_legit_relu_2[grid(512)](buf10, buf11, buf12, primals_12, primals_13, buf15, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1)) buf17 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__log_softmax_convolution_4[grid(64)](buf16, primals_15, buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = buf16 del buf16 triton_poi_fused__log_softmax_convolution_5[grid(256)](buf19, primals_15, buf17, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf17 del buf18 del primals_15 return (buf19, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf2, reinterpret_tensor(buf6, (8,), (1,), 0), buf7, buf10, reinterpret_tensor(buf14, (8,), (1,), 0), buf15, buf19, reinterpret_tensor(buf11, (1, 8, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 8, 1, 1), (8, 1, 1, 1), 0)) class ThreeLayerSemSegNetWideViewNew(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 6, kernel_size=3, padding= 1, stride=1) self.conv1d100 = torch.nn.Conv2d(in_channel, 2, kernel_size=3, padding=101, stride=1, dilation=101) self.conv2d1 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3 = torch.nn.Conv2d(8, out_channel, kernel_size=3, padding =1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv1d100.weight primals_5 = self.conv1d100.bias primals_8 = self.conv2d1.weight primals_9 = self.conv2d1.bias primals_10 = self.conv2d5.weight primals_11 = self.conv2d5.bias primals_14 = self.conv3.weight primals_15 = self.conv3.bias primals_6 = self.batchnorm1.weight primals_7 = self.batchnorm1.bias primals_12 = self.batchnorm2.weight primals_13 = self.batchnorm2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
benkoger/kasanka
ThreeLayerSemSegNetWideView
false
12,161
[ "Apache-2.0" ]
0
d5b1d32b7abf54845af0832da577137397089001
https://github.com/benkoger/kasanka/tree/d5b1d32b7abf54845af0832da577137397089001
UNet
import torch class Block(torch.nn.Module): def __init__(self, in_channels, mid_channel, out_channels, batch_norm=False ): super().__init__() self.conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels= mid_channel, kernel_size=3, padding=1) self.conv2 = torch.nn.Conv2d(in_channels=mid_channel, out_channels= out_channels, kernel_size=3, padding=1) self.batch_norm = batch_norm if batch_norm: self.bn1 = torch.nn.BatchNorm2d(mid_channel) self.bn2 = torch.nn.BatchNorm2d(out_channels) def forward(self, x): x = self.conv1(x) if self.batch_norm: x = self.bn1(x) x = torch.nn.ReLU(inplace=True)(x) x = self.conv2(x) if self.batch_norm: x = self.bn2(x) out = torch.nn.ReLU(inplace=True)(x) return out class UNet(torch.nn.Module): def up(self, x, size): return torch.nn.functional.interpolate(x, size=size, mode=self. upscale_mode) def down(self, x): return torch.nn.MaxPool2d(kernel_size=2)(x) def __init__(self, in_channels, out_channels, batch_norm=False, upscale_mode='nearest'): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.batch_norm = batch_norm self.upscale_mode = upscale_mode self.enc1 = Block(in_channels, 64, 64, batch_norm) self.enc2 = Block(64, 128, 128, batch_norm) self.enc3 = Block(128, 256, 256, batch_norm) self.enc4 = Block(256, 512, 512, batch_norm) self.center = Block(512, 1024, 512, batch_norm) self.dec4 = Block(1024, 512, 256, batch_norm) self.dec3 = Block(512, 256, 128, batch_norm) self.dec2 = Block(256, 128, 64, batch_norm) self.dec1 = Block(128, 64, 64, batch_norm) self.out = torch.nn.Conv2d(in_channels=64, out_channels= out_channels, kernel_size=1) def forward(self, x): enc1 = self.enc1(x) enc2 = self.enc2(self.down(enc1)) enc3 = self.enc3(self.down(enc2)) enc4 = self.enc4(self.down(enc3)) center = self.center(self.down(enc4)) dec4 = self.dec4(torch.cat([self.up(center, enc4.size()[-2:]), enc4 ], 1)) dec3 = self.dec3(torch.cat([self.up(dec4, enc3.size()[-2:]), enc3], 1)) dec2 = self.dec2(torch.cat([self.up(dec3, enc2.size()[-2:]), enc2], 1)) dec1 = self.dec1(torch.cat([self.up(dec2, enc1.size()[-2:]), enc1], 1)) out = self.out(dec1) return out def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_9(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 64 % 1024 x1 = xindex // 8 % 8 x0 = xindex % 8 x3 = xindex // 65536 x4 = xindex % 64 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 4, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 4 * tmp9 + 16 * x2 + 8192 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 1024, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 64 * (-512 + x2) + 32768 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 256 % 512 x1 = xindex // 16 % 16 x0 = xindex % 16 x3 = xindex // 131072 x4 = xindex % 256 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 8, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 8 * tmp9 + 64 * x2 + 16384 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 256 * (-256 + x2) + 65536 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 1024 % 256 x1 = xindex // 32 % 32 x0 = xindex % 32 x3 = xindex // 262144 x4 = xindex % 1024 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 16, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 16 * tmp9 + 256 * x2 + 32768 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 1024 * (-128 + x2) + 131072 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 4096 % 128 x1 = xindex // 64 % 64 x0 = xindex % 64 x3 = xindex // 524288 x4 = xindex % 4096 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 32, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 32 * tmp9 + 1024 * x2 + 65536 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 4096 * (-64 + x2) + 262144 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused_convolution_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (1024,), (1,)) assert_size_stride(primals_20, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_33, (64,), (1,)) assert_size_stride(primals_34, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (64,), (1,)) assert_size_stride(primals_36, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_37, (64,), (1,)) assert_size_stride(primals_38, (4, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_39, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4, buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) buf17 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf15, buf16, buf17, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 512, 8, 8), (32768, 64, 8, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_6[grid(131072)](buf19, primals_15, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 8, 8), (32768, 64, 8, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_6[grid(131072)](buf21, primals_17, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf22 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) buf23 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.int8 ) triton_poi_fused_max_pool2d_with_indices_7[grid(32768)](buf21, buf22, buf23, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_8[grid(65536)](buf25, primals_19, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_19 buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 4, 4), (8192, 16, 4, 1)) buf27 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_9[grid(8)](buf27, 8, XBLOCK=8, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((4, 1024, 8, 8), (65536, 64, 8, 1), torch.float32) triton_poi_fused_cat_10[grid(262144)](buf27, buf26, primals_21, buf21, buf28, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf29 = extern_kernels.convolution(buf28, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 512, 8, 8), (32768, 64, 8, 1)) buf30 = buf29 del buf29 triton_poi_fused_convolution_relu_6[grid(131072)](buf30, primals_23, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_23 buf31 = extern_kernels.convolution(buf30, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 8, 8), (16384, 64, 8, 1)) buf32 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_11[grid(16)](buf32, 16, XBLOCK=16, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((4, 512, 16, 16), (131072, 256, 16, 1), torch.float32) triton_poi_fused_cat_12[grid(524288)](buf32, buf31, primals_25, buf15, buf33, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf34 = extern_kernels.convolution(buf33, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 16, 16), (65536, 256, 16, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_4[grid(262144)](buf35, primals_27, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf36 = extern_kernels.convolution(buf35, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 128, 16, 16), (32768, 256, 16, 1)) buf37 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_13[grid(32)](buf37, 32, XBLOCK=32, num_warps=1, num_stages=1) buf38 = empty_strided_cuda((4, 256, 32, 32), (262144, 1024, 32, 1), torch.float32) triton_poi_fused_cat_14[grid(1048576)](buf37, buf36, primals_29, buf9, buf38, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf39 = extern_kernels.convolution(buf38, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf40 = buf39 del buf39 triton_poi_fused_convolution_relu_2[grid(524288)](buf40, primals_31, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf41 = extern_kernels.convolution(buf40, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf42 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_15[grid(64)](buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_16[grid(2097152)](buf42, buf41, primals_33, buf3, buf43, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) buf44 = extern_kernels.convolution(buf43, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_0[grid(1048576)](buf45, primals_35, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_35 buf46 = extern_kernels.convolution(buf45, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf47 = buf46 del buf46 triton_poi_fused_convolution_relu_0[grid(1048576)](buf47, primals_37, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_37 buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_17[grid(65536)](buf49, primals_39, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_39 buf50 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_18[grid(262144)]( buf41, primals_33, buf50, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf41 del primals_33 buf51 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(131072)]( buf36, primals_29, buf51, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del buf36 del primals_29 buf52 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_20[grid(65536)]( buf31, primals_25, buf52, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf31 del primals_25 buf53 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_21[grid(32768)]( buf26, primals_21, buf53, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf26 del primals_21 return (buf49, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf42, buf43, buf45, buf47, buf50, buf51, buf52, buf53) class Block(torch.nn.Module): def __init__(self, in_channels, mid_channel, out_channels, batch_norm=False ): super().__init__() self.conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels= mid_channel, kernel_size=3, padding=1) self.conv2 = torch.nn.Conv2d(in_channels=mid_channel, out_channels= out_channels, kernel_size=3, padding=1) self.batch_norm = batch_norm if batch_norm: self.bn1 = torch.nn.BatchNorm2d(mid_channel) self.bn2 = torch.nn.BatchNorm2d(out_channels) def forward(self, x): x = self.conv1(x) if self.batch_norm: x = self.bn1(x) x = torch.nn.ReLU(inplace=True)(x) x = self.conv2(x) if self.batch_norm: x = self.bn2(x) out = torch.nn.ReLU(inplace=True)(x) return out class UNetNew(torch.nn.Module): def up(self, x, size): return torch.nn.functional.interpolate(x, size=size, mode=self. upscale_mode) def down(self, x): return torch.nn.MaxPool2d(kernel_size=2)(x) def __init__(self, in_channels, out_channels, batch_norm=False, upscale_mode='nearest'): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.batch_norm = batch_norm self.upscale_mode = upscale_mode self.enc1 = Block(in_channels, 64, 64, batch_norm) self.enc2 = Block(64, 128, 128, batch_norm) self.enc3 = Block(128, 256, 256, batch_norm) self.enc4 = Block(256, 512, 512, batch_norm) self.center = Block(512, 1024, 512, batch_norm) self.dec4 = Block(1024, 512, 256, batch_norm) self.dec3 = Block(512, 256, 128, batch_norm) self.dec2 = Block(256, 128, 64, batch_norm) self.dec1 = Block(128, 64, 64, batch_norm) self.out = torch.nn.Conv2d(in_channels=64, out_channels= out_channels, kernel_size=1) def forward(self, input_0): primals_1 = self.enc1.conv1.weight primals_2 = self.enc1.conv1.bias primals_4 = self.enc1.conv2.weight primals_5 = self.enc1.conv2.bias primals_6 = self.enc2.conv1.weight primals_7 = self.enc2.conv1.bias primals_8 = self.enc2.conv2.weight primals_9 = self.enc2.conv2.bias primals_10 = self.enc3.conv1.weight primals_11 = self.enc3.conv1.bias primals_12 = self.enc3.conv2.weight primals_13 = self.enc3.conv2.bias primals_14 = self.enc4.conv1.weight primals_15 = self.enc4.conv1.bias primals_16 = self.enc4.conv2.weight primals_17 = self.enc4.conv2.bias primals_18 = self.center.conv1.weight primals_19 = self.center.conv1.bias primals_20 = self.center.conv2.weight primals_21 = self.center.conv2.bias primals_22 = self.dec4.conv1.weight primals_23 = self.dec4.conv1.bias primals_24 = self.dec4.conv2.weight primals_25 = self.dec4.conv2.bias primals_26 = self.dec3.conv1.weight primals_27 = self.dec3.conv1.bias primals_28 = self.dec3.conv2.weight primals_29 = self.dec3.conv2.bias primals_30 = self.dec2.conv1.weight primals_31 = self.dec2.conv1.bias primals_32 = self.dec2.conv2.weight primals_33 = self.dec2.conv2.bias primals_34 = self.dec1.conv1.weight primals_35 = self.dec1.conv1.bias primals_36 = self.dec1.conv2.weight primals_37 = self.dec1.conv2.bias primals_38 = self.out.weight primals_39 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39]) return output[0]
amrane99/lung-segmentation
UNet
false
12,162
[ "MIT" ]
0
ab29db75ac78918da5cbf66b830acaf36cf7b44a
https://github.com/amrane99/lung-segmentation/tree/ab29db75ac78918da5cbf66b830acaf36cf7b44a
LowRankResidualMultiHeadAttention
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankResidualMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False) self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False) self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = (self.w_qs_u(self.w_qs_v(q)) + self.w_qs_res(q)).view(sz_b, len_q, n_head, d_k) k = (self.w_ks_u(self.w_ks_v(k)) + self.w_ks_res(k)).view(sz_b, len_k, n_head, d_k) v = (self.w_vs_u(self.w_vs_v(v)) + self.w_vs_res(v)).view(sz_b, len_v, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_u(self.fc_v(q)) + self.fc_res(q)) q += residual q = self.layer_norm(q) return q, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp4, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x2 + 16 * y3), xmask & ymask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_t_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.float32) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (16, 4), (4, 1)) assert_size_stride(primals_13, (1, 16), (16, 1)) assert_size_stride(primals_14, (4, 1), (1, 1)) assert_size_stride(primals_15, (4, 16), (16, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_4, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_5, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_6, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 16), (1, 4), 0), out=buf6) buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) del buf5 triton_poi_fused__to_copy_0[grid(64)](primals_2, buf7, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_2 buf8 = buf1 del buf1 triton_poi_fused__to_copy_1[grid(16)](primals_7, buf8, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_7 buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(buf8, (4, 4), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_8, buf10, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_8 buf11 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf9, reinterpret_tensor(buf10, (4, 16), (1, 4), 0), out=buf11) buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_9, buf12, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_9 buf13 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(buf12, (4, 16), (1, 4), 0), out=buf13) buf14 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 triton_poi_fused__to_copy_0[grid(64)](primals_3, buf14, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf15 = buf8 del buf8 triton_poi_fused__to_copy_1[grid(16)](primals_10, buf15, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_10 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_11, buf17, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_11 buf18 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf16, reinterpret_tensor(buf17, (4, 16), (1, 4), 0), out=buf18) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_12, buf19, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_12 buf20 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(buf19, (4, 16), (1, 4), 0), out=buf20) buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf4, buf6, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf22 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 16)](buf11, buf13, buf22, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf23 = reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0) del buf13 extern_kernels.bmm(reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf22, (16, 4, 4), (16, 4, 1), 0), out=buf23 ) buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf23, buf24, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf27 = reinterpret_tensor(buf23, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf23 triton_poi_fused__softmax__to_copy_5[grid(256)](buf24, buf25, buf27, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf24 buf26 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf11 triton_poi_fused_clone_6[grid(256)](buf18, buf20, buf26, 256, XBLOCK=256, num_warps=4, num_stages=1) buf28 = reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0) del buf20 extern_kernels.bmm(reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf28 ) buf29 = reinterpret_tensor(buf15, (16, 1), (1, 16), 0) del buf15 triton_poi_fused__to_copy_1[grid(16)](primals_13, buf29, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_13 buf30 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf18 triton_poi_fused_clone_7[grid(256)](buf28, buf30, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf28 buf31 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf30, (16, 16), (16, 1), 0), buf29, out=buf31) buf32 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_14, buf32, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_14 buf33 = buf19 del buf19 extern_kernels.mm(buf31, buf32, out=buf33) buf34 = empty_strided_cuda((16, 4), (1, 16), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_15, buf34, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_15 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf30, (16, 16), (16, 1), 0), buf34, out=buf35) buf36 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_9[grid(64)](buf33, buf35, primals_1, buf36, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf33 del buf35 del primals_1 buf37 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf38 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_10[grid(16)](buf36, buf37, buf38, 16, XBLOCK=16, num_warps=1, num_stages=1) buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_11[grid(64)](buf36, buf37, buf38, primals_16, primals_17, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf37 del buf38 del primals_17 return buf39, buf25, primals_16, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), buf2, reinterpret_tensor(buf7, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf10, (4, 16), (1, 4), 0 ), buf9, reinterpret_tensor(buf14, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf17, (4, 16), (1, 4), 0 ), buf16, buf25, reinterpret_tensor(buf30, (16, 16), (16, 1), 0 ), buf31, buf36, reinterpret_tensor(buf34, (4, 16), (16, 1), 0 ), reinterpret_tensor(buf32, (4, 1), (1, 1), 0), reinterpret_tensor( buf29, (1, 16), (16, 1), 0), reinterpret_tensor(buf27, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf21, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf22, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankResidualMultiHeadAttentionNew(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False) self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False) self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, input_0, input_1, input_2): primals_5 = self.w_qs_u.weight primals_4 = self.w_qs_v.weight primals_6 = self.w_qs_res.weight primals_8 = self.w_ks_u.weight primals_7 = self.w_ks_v.weight primals_9 = self.w_ks_res.weight primals_11 = self.w_vs_u.weight primals_10 = self.w_vs_v.weight primals_12 = self.w_vs_res.weight primals_14 = self.fc_u.weight primals_13 = self.fc_v.weight primals_15 = self.fc_res.weight primals_16 = self.layer_norm.weight primals_17 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
bahducoup/factorized_training
LowRankResidualMultiHeadAttention
false
12,163
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
Encoder
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Encoder(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(Encoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'img_channels': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 1024), (1024, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch .float32) triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch. float32) triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(123008)](buf6, primals_2, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 14, 14), (12544, 1, 896, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(50176)](buf8, primals_5, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 6, 6), (4608, 1, 768, 128)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(18432)](buf10, primals_7, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 2, 2), (1024, 1, 512, 256)) buf12 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(1024, 4)]( buf11, primals_9, buf12, buf15, 1024, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del buf11 del primals_9 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf13) del primals_11 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf14) del primals_13 return (buf13, buf14, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), primals_12, primals_10, buf15) class EncoderNew(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(EncoderNew, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc_mu.weight primals_11 = self.fc_mu.bias primals_12 = self.fc_logsigma.weight primals_13 = self.fc_logsigma.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
benedictquartey/softgym_wm
Encoder
false
12,164
[ "BSD-3-Clause" ]
0
0aef75fed207b11029f6052c656a679c105b4677
https://github.com/benedictquartey/softgym_wm/tree/0aef75fed207b11029f6052c656a679c105b4677
FourLayerSemSegNetWideView
import torch import torch.nn as nn class FourLayerSemSegNetWideView(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 6, kernel_size=3, padding= 1, stride=1) self.conv1d100 = torch.nn.Conv2d(in_channel, 2, kernel_size=3, padding=101, stride=1, dilation=101) self.conv2d1 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3d0 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=1, stride=1 ) self.conv3d3 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=4, stride=1, dilation=4) self.conv4 = torch.nn.Conv2d(8, out_channel, kernel_size=3, padding =1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.ReLU3 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm3 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) def forward(self, x): x1a = self.conv1(x) x1b = self.conv1d100(x) x1 = torch.cat((x1a, x1b), dim=1) x1 = self.batchnorm1(x1) x1 = self.ReLU1(x1) x2a = self.conv2d1(x1) x2b = self.conv2d5(x1) x2 = torch.cat((x2a, x2b), dim=1) x2 = self.batchnorm2(x2) x2 = self.ReLU2(x2) x3a = self.conv3d0(x2) x3b = self.conv3d3(x2) x3 = torch.cat((x3a, x3b), dim=1) x3 = self.batchnorm3(x3) x3 = self.ReLU3(x3) x4 = self.conv4(x3) xout = self.softmax(x4) return xout def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 6, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-6 + x1) + 32 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-6 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 8 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 128 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp7 = tmp4 + tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp12 = tmp9 + tmp11 tmp13 = triton_helpers.maximum(tmp8, tmp12) tmp17 = tmp14 + tmp16 tmp18 = triton_helpers.maximum(tmp13, tmp17) tmp19 = tmp3 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp7 - tmp18 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp12 - tmp18 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tmp17 - tmp18 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tl.store(out_ptr0 + x2, tmp18, xmask) tl.store(out_ptr1 + x2, tmp29, xmask) @triton.jit def triton_poi_fused__log_softmax_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = tl_math.log(tmp5) tmp7 = tmp4 - tmp6 tl.store(in_out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (6, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (8,), (1,)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (8,), (1,)) assert_size_stride(primals_13, (8,), (1,)) assert_size_stride(primals_14, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (8,), (1,)) assert_size_stride(primals_19, (8,), (1,)) assert_size_stride(primals_20, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_21, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 4, 4), (96, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(101, 101), dilation=(101, 101), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 2, 4, 4), (32, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, primals_2, buf1, primals_5, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_5 buf3 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf4 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf6 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(8)](buf2, buf3, buf4, buf6, 8, 64, XBLOCK=8, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_relu_2[grid(512)](buf2, buf3, buf4, primals_6, primals_7, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_3[grid(512)](buf8, primals_9, buf9, primals_11, buf10, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del buf9 del primals_11 del primals_9 buf11 = buf4 del buf4 buf12 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf14 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(8)](buf10, buf11, buf12, buf14, 8, 64, XBLOCK=8, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused__native_batch_norm_legit_relu_2[grid(512)](buf10, buf11, buf12, primals_12, primals_13, buf15, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1)) buf17 = extern_kernels.convolution(buf15, primals_16, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 4, 4), (64, 16, 4, 1)) buf18 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_3[grid(512)](buf16, primals_15, buf17, primals_17, buf18, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf16 del buf17 del primals_15 del primals_17 buf19 = buf12 del buf12 buf20 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf22 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(8)](buf18, buf19, buf20, buf22, 8, 64, XBLOCK=8, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused__native_batch_norm_legit_relu_2[grid(512)](buf18, buf19, buf20, primals_18, primals_19, buf23, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf20 del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 4, 4), (64, 16, 4, 1)) buf25 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__log_softmax_convolution_4[grid(64)](buf24, primals_21, buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = buf24 del buf24 triton_poi_fused__log_softmax_convolution_5[grid(256)](buf27, primals_21, buf25, buf26, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf25 del buf26 del primals_21 return (buf27, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, buf2, reinterpret_tensor(buf6, (8,), (1,), 0), buf7, buf10, reinterpret_tensor(buf14, (8,), (1,), 0), buf15, buf18, reinterpret_tensor(buf22, (8,), (1,), 0), buf23, buf27, reinterpret_tensor(buf19, (1, 8, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf11, (1, 8, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 8, 1, 1), (8, 1, 1, 1), 0)) class FourLayerSemSegNetWideViewNew(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv1 = torch.nn.Conv2d(in_channel, 6, kernel_size=3, padding= 1, stride=1) self.conv1d100 = torch.nn.Conv2d(in_channel, 2, kernel_size=3, padding=101, stride=1, dilation=101) self.conv2d1 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=2, stride=1, dilation=2) self.conv2d5 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=6, stride=1, dilation=6) self.conv3d0 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=1, stride=1 ) self.conv3d3 = torch.nn.Conv2d(8, 4, kernel_size=3, padding=4, stride=1, dilation=4) self.conv4 = torch.nn.Conv2d(8, out_channel, kernel_size=3, padding =1, stride=1) self.ReLU1 = torch.nn.ReLU() self.ReLU2 = torch.nn.ReLU() self.ReLU3 = torch.nn.ReLU() self.softmax = torch.nn.LogSoftmax(dim=1) self.batchnorm1 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm2 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) self.batchnorm3 = torch.nn.BatchNorm2d(8, track_running_stats=False, momentum=1.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv1d100.weight primals_5 = self.conv1d100.bias primals_8 = self.conv2d1.weight primals_9 = self.conv2d1.bias primals_10 = self.conv2d5.weight primals_11 = self.conv2d5.bias primals_14 = self.conv3d0.weight primals_15 = self.conv3d0.bias primals_16 = self.conv3d3.weight primals_17 = self.conv3d3.bias primals_20 = self.conv4.weight primals_21 = self.conv4.bias primals_6 = self.batchnorm1.weight primals_7 = self.batchnorm1.bias primals_12 = self.batchnorm2.weight primals_13 = self.batchnorm2.bias primals_18 = self.batchnorm3.weight primals_19 = self.batchnorm3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
benkoger/kasanka
FourLayerSemSegNetWideView
false
12,165
[ "Apache-2.0" ]
0
d5b1d32b7abf54845af0832da577137397089001
https://github.com/benkoger/kasanka/tree/d5b1d32b7abf54845af0832da577137397089001
ScaledDotProductAttention
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = tmp2.to(tl.float32) tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x2 + 4 * y3), tmp1, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__to_copy_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(64, 4)](arg1_1, buf1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float16) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax__to_copy_3[grid(256)](buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf6 = buf1 del buf1 triton_poi_fused__to_copy_4[grid(256)](arg2_1, buf6, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg2_1 buf7 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7) del buf5 del buf6 return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf4 class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
bahducoup/factorized_training
ScaledDotProductAttention
false
12,166
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
LowRankResidualEncoderLayer
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankResidualMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False) self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False) self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = (self.w_qs_u(self.w_qs_v(q)) + self.w_qs_res(q)).view(sz_b, len_q, n_head, d_k) k = (self.w_ks_u(self.w_ks_v(k)) + self.w_ks_res(k)).view(sz_b, len_k, n_head, d_k) v = (self.w_vs_u(self.w_vs_v(v)) + self.w_vs_res(v)).view(sz_b, len_v, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_u(self.fc_v(q)) + self.fc_res(q)) q += residual q = self.layer_norm(q) return q, attn class LowRankResidualPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(int(d_in / 4), d_hid, bias=False) self.w_1_v = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_res = nn.Linear(d_in, d_hid) self.w_2_u = nn.Linear(int(d_in / 4), d_in, bias=False) self.w_2_v = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_res = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = F.relu(self.w_1_u(self.w_1_v(x)) + self.w_1_res(x)) x = self.w_2_u(self.w_2_v(x)) + self.w_2_res(x) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankResidualEncoderLayer(nn.Module): """ Compose with two layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankResidualEncoderLayer, self).__init__() self.slf_attn = LowRankResidualMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankResidualPositionwiseFeedForward(d_model, d_inner, dropout=dropout) @autocast() def forward(self, enc_input, slf_attn_mask=None): enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, mask=slf_attn_mask) enc_output = self.pos_ffn(enc_output) return enc_output, enc_slf_attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp4, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x2 + 16 * y3), xmask & ymask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_t_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.float32) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused__to_copy_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_12(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2.to(tl.float32) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = 0.0 tmp9 = tmp7 <= tmp8 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tmp2.to(tl.float32) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp6 = tmp5.to(tl.float32) tmp8 = tmp6 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_native_layer_norm_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (1, 16), (16, 1)) assert_size_stride(primals_12, (4, 1), (1, 1)) assert_size_stride(primals_13, (4, 16), (16, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (1, 4), (4, 1)) assert_size_stride(primals_17, (4, 1), (1, 1)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (1, 4), (4, 1)) assert_size_stride(primals_21, (4, 1), (1, 1)) assert_size_stride(primals_22, (4, 4), (4, 1)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4,), (1,)) assert_size_stride(primals_25, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_2, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_3, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_4, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_4 buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 16), (1, 4), 0), out=buf6) buf7 = buf1 del buf1 triton_poi_fused__to_copy_1[grid(16)](primals_5, buf7, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_5 buf8 = buf5 del buf5 extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_6, buf9, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_6 buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf8, reinterpret_tensor(buf9, (4, 16), (1, 4), 0 ), out=buf10) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_7, buf11, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_7 buf12 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf11, (4, 16), (1, 4), 0), out=buf12) buf13 = buf7 del buf7 triton_poi_fused__to_copy_1[grid(16)](primals_8, buf13, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_8 buf14 = buf11 del buf11 extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_9, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_9 buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf14, reinterpret_tensor(buf15, (4, 16), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_10, buf17, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_10 buf18 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf17, (4, 16), (1, 4), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf4, buf6, buf19, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf20 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 16)](buf10, buf12, buf20, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf21 = reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0) del buf12 extern_kernels.bmm(reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0), out=buf21 ) buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf21, buf22, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf25 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf21 triton_poi_fused__softmax__to_copy_5[grid(256)](buf22, buf23, buf25, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf22 buf24 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 triton_poi_fused_clone_6[grid(256)](buf16, buf18, buf24, 256, XBLOCK=256, num_warps=4, num_stages=1) buf26 = reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0) del buf18 extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf24, (16, 4, 4), (16, 4, 1), 0), out=buf26 ) buf27 = reinterpret_tensor(buf13, (16, 1), (1, 16), 0) del buf13 triton_poi_fused__to_copy_1[grid(16)](primals_11, buf27, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_11 buf28 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf16 triton_poi_fused_clone_7[grid(256)](buf26, buf28, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf26 buf29 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf27, out=buf29) buf30 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_12, buf30, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_12 buf31 = buf17 del buf17 extern_kernels.mm(buf29, buf30, out=buf31) buf32 = empty_strided_cuda((16, 4), (1, 16), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_13, buf32, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_13 buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf32, out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_9[grid(64)](buf31, buf33, primals_1, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf35 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf36 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_10[grid(16)](buf34, buf35, buf36, 16, XBLOCK=16, num_warps=1, num_stages=1) buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf39 = reinterpret_tensor(buf33, (4, 4, 4), (16, 4, 1), 0) del buf33 triton_poi_fused__to_copy_native_layer_norm_11[grid(64)](buf34, buf35, buf36, primals_14, primals_15, buf37, buf39, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_15 buf38 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_16, buf38, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_16 buf40 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), buf38, out=buf40) buf41 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_17, buf41, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_17 buf42 = buf31 del buf31 extern_kernels.mm(buf40, buf41, out=buf42) buf43 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_18, buf43, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_18 buf44 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), buf43, out=buf44) buf45 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_20, buf45, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_20 buf46 = reinterpret_tensor(buf42, (4, 4, 4), (16, 4, 1), 0) del buf42 buf56 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_12[grid(64)](buf46, buf44, primals_19, buf56, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_19 buf47 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf46, (16, 4), (4, 1), 0), buf45, out=buf47) buf48 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_21, buf48, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_21 buf49 = buf44 del buf44 extern_kernels.mm(buf47, buf48, out=buf49) buf50 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_22, buf50, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_22 buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf46, (16, 4), (4, 1), 0), buf50, out=buf51) buf52 = buf37 del buf37 triton_poi_fused_add_13[grid(64)](buf52, buf49, buf51, primals_23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf49 del buf51 del primals_23 buf53 = buf36 del buf36 buf54 = buf35 del buf35 triton_poi_fused_native_layer_norm_10[grid(16)](buf52, buf53, buf54, 16, XBLOCK=16, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_14[grid(64)](buf52, buf53, buf54, primals_24, primals_25, buf55, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf53 del buf54 del primals_25 return buf55, buf23, primals_14, primals_24, reinterpret_tensor(buf0, ( 16, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), buf2, reinterpret_tensor(buf9, (4, 16), (1, 4), 0 ), buf8, reinterpret_tensor(buf15, (4, 16), (1, 4), 0 ), buf14, buf23, reinterpret_tensor(buf28, (16, 16), (16, 1), 0 ), buf29, buf34, reinterpret_tensor(buf39, (16, 4), (4, 1), 0 ), buf40, reinterpret_tensor(buf46, (16, 4), (4, 1), 0 ), buf47, buf52, reinterpret_tensor(buf50, (4, 4), (4, 1), 0 ), reinterpret_tensor(buf48, (4, 1), (1, 1), 0), reinterpret_tensor( buf45, (1, 4), (4, 1), 0), buf56, reinterpret_tensor(buf43, (4, 4), (4, 1), 0), reinterpret_tensor(buf41, (4, 1), (1, 1), 0 ), reinterpret_tensor(buf38, (1, 4), (4, 1), 0), reinterpret_tensor( buf32, (4, 16), (16, 1), 0), reinterpret_tensor(buf30, (4, 1), (1, 1), 0), reinterpret_tensor(buf27, (1, 16), (16, 1), 0 ), reinterpret_tensor(buf25, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf24, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf20, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankResidualMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False) self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False) self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = (self.w_qs_u(self.w_qs_v(q)) + self.w_qs_res(q)).view(sz_b, len_q, n_head, d_k) k = (self.w_ks_u(self.w_ks_v(k)) + self.w_ks_res(k)).view(sz_b, len_k, n_head, d_k) v = (self.w_vs_u(self.w_vs_v(v)) + self.w_vs_res(v)).view(sz_b, len_v, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_u(self.fc_v(q)) + self.fc_res(q)) q += residual q = self.layer_norm(q) return q, attn class LowRankResidualPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(int(d_in / 4), d_hid, bias=False) self.w_1_v = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_res = nn.Linear(d_in, d_hid) self.w_2_u = nn.Linear(int(d_in / 4), d_in, bias=False) self.w_2_v = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_res = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = F.relu(self.w_1_u(self.w_1_v(x)) + self.w_1_res(x)) x = self.w_2_u(self.w_2_v(x)) + self.w_2_res(x) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankResidualEncoderLayerNew(nn.Module): """ Compose with two layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankResidualEncoderLayerNew, self).__init__() self.slf_attn = LowRankResidualMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankResidualPositionwiseFeedForward(d_model, d_inner, dropout=dropout) def forward(self, input_0): primals_3 = self.slf_attn.w_qs_u.weight primals_2 = self.slf_attn.w_qs_v.weight primals_4 = self.slf_attn.w_qs_res.weight primals_6 = self.slf_attn.w_ks_u.weight primals_5 = self.slf_attn.w_ks_v.weight primals_7 = self.slf_attn.w_ks_res.weight primals_9 = self.slf_attn.w_vs_u.weight primals_8 = self.slf_attn.w_vs_v.weight primals_10 = self.slf_attn.w_vs_res.weight primals_12 = self.slf_attn.fc_u.weight primals_11 = self.slf_attn.fc_v.weight primals_13 = self.slf_attn.fc_res.weight primals_14 = self.slf_attn.layer_norm.weight primals_15 = self.slf_attn.layer_norm.bias primals_17 = self.pos_ffn.w_1_u.weight primals_16 = self.pos_ffn.w_1_v.weight primals_18 = self.pos_ffn.w_1_res.weight primals_19 = self.pos_ffn.w_1_res.bias primals_21 = self.pos_ffn.w_2_u.weight primals_20 = self.pos_ffn.w_2_v.weight primals_22 = self.pos_ffn.w_2_res.weight primals_23 = self.pos_ffn.w_2_res.bias primals_24 = self.pos_ffn.layer_norm.weight primals_25 = self.pos_ffn.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0], output[1]
bahducoup/factorized_training
LowRankResidualEncoderLayer
false
12,167
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
GlobalAvgPool
import torch import torch as th from torch import nn class GlobalAvgPool(nn.Module): def __init__(self): super(GlobalAvgPool, self).__init__() def forward(self, x): return th.mean(x, dim=[-2, -1]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf1, class GlobalAvgPoolNew(nn.Module): def __init__(self): super(GlobalAvgPoolNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bjuncek/video_feature_extractor
GlobalAvgPool
false
12,168
[ "Apache-2.0" ]
0
cac06b450d1164beb3f3710d5018c19091bce348
https://github.com/bjuncek/video_feature_extractor/tree/cac06b450d1164beb3f3710d5018c19091bce348
EncoderLayer
import math import torch import torch.nn as nn import torch.nn.functional as F class AffineLayer(nn.Module): def __init__(self, dropout, d_model, d_ff): super(AffineLayer, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class MultiHeadedAttention(nn.Module): def __init__(self, num_head, d_model, dropout=0.1): super(MultiHeadedAttention, self).__init__() assert d_model % num_head == 0 self.d_k = d_model // num_head self.h = num_head self.linear_key = nn.Linear(d_model, d_model) self.linear_value = nn.Linear(d_model, d_model) self.linear_query = nn.Linear(d_model, d_model) self.linear_out = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def attention(self, query, key, value, mask, dropout=None): d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def forward(self, query, key, value, mask): nbatches = query.size(0) query = self.linear_query(query).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) key = self.linear_key(key).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) value = self.linear_value(value).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) mask = mask.unsqueeze(1) x, _attn = self.attention(query, key, value, mask, dropout=self.dropout ) x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k ) return self.linear_out(x) class EncoderLayer(nn.Module): def __init__(self, num_head, dropout, d_model, d_ff): super(EncoderLayer, self).__init__() self.att_layer = MultiHeadedAttention(num_head, d_model, dropout) self.norm_att = nn.LayerNorm(d_model) self.dropout_att = nn.Dropout(dropout) self.affine_layer = AffineLayer(dropout, d_model, d_ff) self.norm_affine = nn.LayerNorm(d_model) self.dropout_affine = nn.Dropout(dropout) def forward(self, x, mask): x_att = self.norm_att(x * mask) x_att = self.att_layer(x_att, x_att, x_att, mask) x = x + self.dropout_att(x_att) x_affine = self.norm_affine(x * mask) x_affine = self.affine_layer(x_affine) return x + self.dropout_affine(x_affine) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_head': 4, 'dropout': 0.5, 'd_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_mul_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_div_eq_masked_fill_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp2, tmp6, tmp5) tmp9 = tmp8 == tmp1 tmp11 = tmp10 * tmp4 tmp12 = tl.where(tmp9, tmp6, tmp11) tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp15 = tmp14 == tmp1 tmp17 = tmp16 * tmp4 tmp18 = tl.where(tmp15, tmp6, tmp17) tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp21 = tmp20 == tmp1 tmp23 = tmp22 * tmp4 tmp24 = tl.where(tmp21, tmp6, tmp23) tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tl_math.exp(tmp26) tmp28 = tmp12 - tmp25 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp18 - tmp25 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp24 - tmp25 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + x3, tmp25, xmask) tl.store(out_ptr1 + x3, tmp36, xmask) @triton.jit def triton_poi_fused__softmax_div_eq_masked_fill_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_out_ptr0 + x5, xmask) tmp8 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp2, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + x5, tmp12, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 + tmp18 tmp21 = tmp19 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = 4.0 tmp24 = tmp22 / tmp23 tmp25 = tmp4 - tmp24 tmp26 = tmp25 * tmp25 tmp27 = tmp9 - tmp24 tmp28 = tmp27 * tmp27 tmp29 = tmp26 + tmp28 tmp30 = tmp15 - tmp24 tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp33 = tmp21 - tmp24 tmp34 = tmp33 * tmp33 tmp35 = tmp32 + tmp34 tmp36 = tmp35 / tmp23 tl.store(out_ptr0 + x0, tmp24, xmask) tl.store(out_ptr1 + x0, tmp36, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x2, xmask) tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_mul_native_layer_norm_0[grid(16)](primals_1, primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_native_layer_norm_1[grid(64)](primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, primals_6, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf3 triton_poi_fused_clone_2[grid(16, 4)](buf4, primals_8, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf4 buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_eq_masked_fill_3[grid(64)](primals_2, buf8, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_div_eq_masked_fill_4[grid(256)](buf11, primals_2, buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf9 triton_poi_fused_clone_2[grid(16, 4)](buf5, primals_10, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf13 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13) buf14 = reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf10 triton_poi_fused_clone_5[grid(16, 4)](buf13, buf14, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0) del buf13 extern_kernels.addmm(primals_12, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_12 buf16 = buf1 del buf1 buf17 = buf0 del buf0 triton_poi_fused_add_mul_native_layer_norm_6[grid(16)](primals_1, buf15, primals_2, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_7[grid(64)](primals_1, buf15, primals_2, buf16, buf17, primals_13, primals_14, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf16 del buf17 del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_8[grid(64)](buf20, primals_16, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf21) buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0) del buf21 triton_poi_fused_add_9[grid(64)](buf22, primals_1, buf15, primals_18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 return buf22, primals_1, primals_2, primals_13, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf20, (16, 4), (4, 1), 0 ), primals_17, buf23, primals_15, primals_11, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0 ), primals_9, primals_7, primals_5 class AffineLayer(nn.Module): def __init__(self, dropout, d_model, d_ff): super(AffineLayer, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class MultiHeadedAttention(nn.Module): def __init__(self, num_head, d_model, dropout=0.1): super(MultiHeadedAttention, self).__init__() assert d_model % num_head == 0 self.d_k = d_model // num_head self.h = num_head self.linear_key = nn.Linear(d_model, d_model) self.linear_value = nn.Linear(d_model, d_model) self.linear_query = nn.Linear(d_model, d_model) self.linear_out = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def attention(self, query, key, value, mask, dropout=None): d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def forward(self, query, key, value, mask): nbatches = query.size(0) query = self.linear_query(query).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) key = self.linear_key(key).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) value = self.linear_value(value).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) mask = mask.unsqueeze(1) x, _attn = self.attention(query, key, value, mask, dropout=self.dropout ) x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k ) return self.linear_out(x) class EncoderLayerNew(nn.Module): def __init__(self, num_head, dropout, d_model, d_ff): super(EncoderLayerNew, self).__init__() self.att_layer = MultiHeadedAttention(num_head, d_model, dropout) self.norm_att = nn.LayerNorm(d_model) self.dropout_att = nn.Dropout(dropout) self.affine_layer = AffineLayer(dropout, d_model, d_ff) self.norm_affine = nn.LayerNorm(d_model) self.dropout_affine = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_5 = self.att_layer.linear_key.weight primals_3 = self.att_layer.linear_key.bias primals_7 = self.att_layer.linear_value.weight primals_4 = self.att_layer.linear_value.bias primals_9 = self.att_layer.linear_query.weight primals_6 = self.att_layer.linear_query.bias primals_11 = self.att_layer.linear_out.weight primals_8 = self.att_layer.linear_out.bias primals_10 = self.norm_att.weight primals_12 = self.norm_att.bias primals_15 = self.affine_layer.w_1.weight primals_13 = self.affine_layer.w_1.bias primals_17 = self.affine_layer.w_2.weight primals_14 = self.affine_layer.w_2.bias primals_16 = self.norm_affine.weight primals_18 = self.norm_affine.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
bekirufuk/pointer_summarizer
EncoderLayer
false
12,169
[ "Apache-2.0" ]
0
8fc9726f9337b26339848d896a09e7e8f9456bcc
https://github.com/bekirufuk/pointer_summarizer/tree/8fc9726f9337b26339848d896a09e7e8f9456bcc
Decoder
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Decoder(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(Decoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.unsqueeze(-1).unsqueeze(-1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) reconstruction = F.sigmoid(self.deconv4(x)) return reconstruction def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'img_channels': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1024, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(131072, 25)](primals_4, buf0, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch .float32) triton_poi_fused_2[grid(2048, 36)](primals_8, buf2, 2048, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32 ) triton_poi_fused_3[grid(128, 36)](primals_10, buf3, 128, 36, XBLOCK =32, YBLOCK=32, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 1024 ), (1, 4), 0), out=buf4) del primals_1 buf5 = buf4 del buf4 buf14 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(4096)](buf5, primals_2, buf14, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups =1, bias=None) assert_size_stride(buf6, (4, 128, 5, 5), (3200, 1, 640, 128)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(12800)](buf7, primals_5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = extern_kernels.convolution(buf7, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 13, 13), (10816, 1, 832, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(43264)](buf9, primals_7, 43264, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 32, 30, 30), (28800, 1, 960, 32)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_7[grid(115200)](buf11, primals_9, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf12 = extern_kernels.convolution(buf11, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 64, 64), (16384, 1, 256, 4)) buf13 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_8[grid(16, 4096)](buf12, primals_11, buf13, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) del buf12 del primals_11 return buf13, primals_3, buf0, buf1, buf2, buf3, reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf7, buf9, buf11, buf13, buf14 class DecoderNew(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(DecoderNew, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.deconv1.weight primals_5 = self.deconv1.bias primals_6 = self.deconv2.weight primals_7 = self.deconv2.bias primals_8 = self.deconv3.weight primals_9 = self.deconv3.bias primals_10 = self.deconv4.weight primals_11 = self.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
benedictquartey/softgym_wm
Decoder
false
12,170
[ "BSD-3-Clause" ]
0
0aef75fed207b11029f6052c656a679c105b4677
https://github.com/benedictquartey/softgym_wm/tree/0aef75fed207b11029f6052c656a679c105b4677
BehlerAngular
import torch from torch import nn as nn class BehlerAngular(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. Args: zetas (set of int): Set of exponents used to compute angular Behler term (default={1}) """ def __init__(self, zetas={1}): super(BehlerAngular, self).__init__() self.zetas = zetas def forward(self, cos_theta): """ Args: cos_theta (torch.Tensor): Cosines between all pairs of neighbors of the central atom. Returns: torch.Tensor: Tensor containing values of the angular filters. """ angular_pos = [(2 ** (1 - zeta) * ((1.0 - cos_theta) ** zeta). unsqueeze(-1)) for zeta in self.zetas] angular_neg = [(2 ** (1 - zeta) * ((1.0 + cos_theta) ** zeta). unsqueeze(-1)) for zeta in self.zetas] angular_all = angular_pos + angular_neg return torch.cat(angular_all, -1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 1.0 tmp7 = tmp6 - tmp5 tmp8 = tmp7 * tmp6 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp14 = tl.load(in_ptr0 + x1, tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp14 + tmp6 tmp16 = tmp15 * tmp6 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp10, tmp18) tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class BehlerAngularNew(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. Args: zetas (set of int): Set of exponents used to compute angular Behler term (default={1}) """ def __init__(self, zetas={1}): super(BehlerAngularNew, self).__init__() self.zetas = zetas def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
blindcharzard/AttnSchNet
BehlerAngular
false
12,171
[ "MIT" ]
0
297bd130086459be6b732d68377193e244536bfc
https://github.com/blindcharzard/AttnSchNet/tree/297bd130086459be6b732d68377193e244536bfc
MHSA
import torch import torch.utils.data import torch.nn as nn class MHSA(nn.Module): def __init__(self, n_dims, width=14, height=14, heads=4): super(MHSA, self).__init__() self.heads = heads self.query = nn.Conv2d(n_dims, n_dims, kernel_size=1) self.key = nn.Conv2d(n_dims, n_dims, kernel_size=1) self.value = nn.Conv2d(n_dims, n_dims, kernel_size=1) self.rel_h = nn.Parameter(torch.randn([1, heads, n_dims // heads, 1, int(height)]), requires_grad=True) self.rel_w = nn.Parameter(torch.randn([1, heads, n_dims // heads, int(width), 1]), requires_grad=True) self.softmax = nn.Softmax(dim=-1) def forward(self, x): n_batch, C, width, height = x.size() q = self.query(x).view(n_batch, self.heads, C // self.heads, -1) k = self.key(x).view(n_batch, self.heads, C // self.heads, -1) v = self.value(x).view(n_batch, self.heads, C // self.heads, -1) content_content = torch.matmul(q.permute(0, 1, 3, 2), k) _c1, _c2, c3, _c4 = content_content.size() content_position = (self.rel_h + self.rel_w).view(1, self.heads, C // self.heads, -1).permute(0, 1, 3, 2) content_position = torch.matmul(content_position, q) content_position = (content_position if content_content.shape == content_position.shape else content_position[:, :, :c3]) assert content_content.shape == content_position.shape energy = content_content + content_position attention = self.softmax(energy) out = torch.matmul(v, attention.permute(0, 1, 3, 2)) out = out.view(n_batch, C, width, height) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 196 x1 = xindex // 196 % 4 x3 = xindex % 784 x4 = xindex tmp0 = tl.load(in_ptr0 + (14 * x1 + x0 % 14), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3 // 14, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_per_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + 16 * x0 + 3136 * x1), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp8 / tmp12 tl.store(out_ptr2 + (r2 + 16 * x3), tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4, 1, 1, 14), (56, 14, 14, 14, 1)) assert_size_stride(primals_9, (1, 4, 1, 14, 1), (56, 14, 14, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf4, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 16, 1), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 196, 1), (784, 196, 1, 1), torch. float32) triton_poi_fused_clone_1[grid(3136)](primals_8, primals_9, buf6, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 del primals_9 buf7 = empty_strided_cuda((16, 196, 16), (3136, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 196, 1), (196, 1, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (16, 16, 1), 0), out=buf7) buf10 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) triton_per_fused__softmax_add_2[grid(256)](buf5, buf7, buf10, 256, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf5 del buf7 buf11 = buf3 del buf3 triton_poi_fused_convolution_0[grid(256)](buf11, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf12 = empty_strided_cuda((16, 1, 16), (16, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf11, (16, 1, 16), (16, 0, 1 ), 0), reinterpret_tensor(buf10, (16, 16, 16), (256, 1, 16), 0), out=buf12) return (reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_2, primals_4, primals_6, buf1, buf10, reinterpret_tensor(buf11, (16, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf6, (16, 1, 196), (196, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)) class MHSANew(nn.Module): def __init__(self, n_dims, width=14, height=14, heads=4): super(MHSANew, self).__init__() self.heads = heads self.query = nn.Conv2d(n_dims, n_dims, kernel_size=1) self.key = nn.Conv2d(n_dims, n_dims, kernel_size=1) self.value = nn.Conv2d(n_dims, n_dims, kernel_size=1) self.rel_h = nn.Parameter(torch.randn([1, heads, n_dims // heads, 1, int(height)]), requires_grad=True) self.rel_w = nn.Parameter(torch.randn([1, heads, n_dims // heads, int(width), 1]), requires_grad=True) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0): primals_8 = self.rel_h primals_9 = self.rel_w primals_2 = self.query.weight primals_3 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
binghuiwu98/discriminatory-yolov5
MHSA
false
12,172
[ "Apache-2.0" ]
0
831bfdb8e0df38e247a72ca029ee3301fc14a311
https://github.com/binghuiwu98/discriminatory-yolov5/tree/831bfdb8e0df38e247a72ca029ee3301fc14a311
LowRankEncoderLayer
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.fc_u = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_v = nn.Linear(int(d_model / 4), d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, _len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs_v(self.w_qs_u(q)).view(sz_b, len_q, n_head, d_k) k = self.w_ks_v(self.w_ks_u(k)).view(sz_b, len_k, n_head, d_k) v = self.w_vs_v(self.w_vs_u(v)).view(sz_b, len_k, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_v(self.fc_u(q))) q += residual q = self.layer_norm(q) return q, attn class LowRankPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_v = nn.Linear(int(d_in / 4), d_hid) self.w_2_u = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_v = nn.Linear(int(d_in / 4), d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = self.w_2_v(self.w_2_u(F.relu(self.w_1_v(self.w_1_u(x))))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankEncoderLayer(nn.Module): """ Compose with two layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankEncoderLayer, self).__init__() self.slf_attn = LowRankMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankPositionwiseFeedForward(d_model, d_inner, dropout=dropout) @autocast() def forward(self, enc_input, slf_attn_mask=None): enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, mask=slf_attn_mask) enc_output = self.pos_ffn(enc_output) return enc_output, enc_slf_attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last').to(tl.float32) tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_t_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .float32) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 + tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp9.to(tl.float32) tmp12 = tmp10 + tmp11 tmp13 = tmp8 + tmp12 tmp15 = tmp14.to(tl.float32) tmp17 = tmp15 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused__to_copy_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp5 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp14.to(tl.float32) tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 + tmp2 tmp4 = tmp3.to(tl.float32) tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (1, 16), (16, 1)) assert_size_stride(primals_9, (4, 1), (1, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (1, 4), (4, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (1, 4), (4, 1)) assert_size_stride(primals_16, (4, 1), (1, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_2, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_3, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), out=buf4) buf5 = buf1 del buf1 triton_poi_fused__to_copy_1[grid(16)](primals_4, buf5, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (1, 4), 0), out=buf6) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_5, buf7, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_5 buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf6, reinterpret_tensor(buf7, (4, 16), (1, 4), 0 ), out=buf8) buf9 = buf5 del buf5 triton_poi_fused__to_copy_1[grid(16)](primals_6, buf9, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_6 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), out=buf10) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_7, buf11, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_7 buf12 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf10, reinterpret_tensor(buf11, (4, 16), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf4, buf13, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_clone_3[grid(64, 4)](buf8, buf14, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf15 = reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0) del buf8 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), out=buf15 ) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf15, buf16, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf18 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf15 triton_poi_fused__softmax__to_copy_5[grid(256)](buf16, buf17, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf16 buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf12, buf19, 256, XBLOCK=256, num_warps=4, num_stages=1) buf20 = reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0) del buf12 extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20 ) buf21 = reinterpret_tensor(buf9, (16, 1), (1, 16), 0) del buf9 triton_poi_fused__to_copy_1[grid(16)](primals_8, buf21, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_8 buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf20, buf22, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf20 buf23 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf22, (16, 16), (16, 1), 0), buf21, out=buf23) buf24 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_9, buf24, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_9 buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf23, buf24, out=buf25) buf26 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf27 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(16)](buf25, primals_1, buf26, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_add_native_layer_norm_9[grid(64)](buf25, primals_1, buf26, buf27, primals_10, primals_11, buf28, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_12, buf29, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_12 buf31 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf29, out=buf31) buf32 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_13, buf32, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_13 buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf31, buf32, out=buf33) buf34 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_15, buf34, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_15 buf35 = reinterpret_tensor(buf33, (4, 4, 4), (16, 4, 1), 0) del buf33 buf43 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_10[grid(64)](buf35, primals_14, buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf36 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf35, (16, 4), (4, 1), 0), buf34, out=buf36) buf37 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_16, buf37, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_16 buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf36, buf37, out=buf38) buf39 = buf28 del buf28 triton_poi_fused_add_11[grid(64)](buf39, buf38, primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf38 del primals_17 buf40 = buf27 del buf27 buf41 = buf26 del buf26 triton_poi_fused_native_layer_norm_12[grid(16)](buf39, buf40, buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_13[grid(64)](buf39, buf40, buf41, primals_18, primals_19, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf40 del buf41 del primals_19 return buf42, buf17, primals_1, primals_10, primals_18, reinterpret_tensor( buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), buf2, reinterpret_tensor(buf7, (4, 16), (1, 4), 0 ), buf6, reinterpret_tensor(buf11, (4, 16), (1, 4), 0 ), buf10, buf17, reinterpret_tensor(buf22, (16, 16), (16, 1), 0 ), buf23, buf25, reinterpret_tensor(buf30, (16, 4), (4, 1), 0 ), buf31, reinterpret_tensor(buf35, (16, 4), (4, 1), 0 ), buf36, buf39, reinterpret_tensor(buf37, (4, 1), (1, 1), 0 ), reinterpret_tensor(buf34, (1, 4), (4, 1), 0 ), buf43, reinterpret_tensor(buf32, (4, 1), (1, 1), 0 ), reinterpret_tensor(buf29, (1, 4), (4, 1), 0), reinterpret_tensor( buf24, (4, 1), (1, 1), 0), reinterpret_tensor(buf21, (1, 16), (16, 1), 0), reinterpret_tensor(buf18, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.fc_u = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_v = nn.Linear(int(d_model / 4), d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, _len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs_v(self.w_qs_u(q)).view(sz_b, len_q, n_head, d_k) k = self.w_ks_v(self.w_ks_u(k)).view(sz_b, len_k, n_head, d_k) v = self.w_vs_v(self.w_vs_u(v)).view(sz_b, len_k, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_v(self.fc_u(q))) q += residual q = self.layer_norm(q) return q, attn class LowRankPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_v = nn.Linear(int(d_in / 4), d_hid) self.w_2_u = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_v = nn.Linear(int(d_in / 4), d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = self.w_2_v(self.w_2_u(F.relu(self.w_1_v(self.w_1_u(x))))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankEncoderLayerNew(nn.Module): """ Compose with two layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankEncoderLayerNew, self).__init__() self.slf_attn = LowRankMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankPositionwiseFeedForward(d_model, d_inner, dropout=dropout) def forward(self, input_0): primals_2 = self.slf_attn.w_qs_u.weight primals_3 = self.slf_attn.w_qs_v.weight primals_4 = self.slf_attn.w_ks_u.weight primals_5 = self.slf_attn.w_ks_v.weight primals_6 = self.slf_attn.w_vs_u.weight primals_7 = self.slf_attn.w_vs_v.weight primals_8 = self.slf_attn.fc_u.weight primals_9 = self.slf_attn.fc_v.weight primals_10 = self.slf_attn.layer_norm.weight primals_11 = self.slf_attn.layer_norm.bias primals_12 = self.pos_ffn.w_1_u.weight primals_13 = self.pos_ffn.w_1_v.weight primals_14 = self.pos_ffn.w_1_v.bias primals_15 = self.pos_ffn.w_2_u.weight primals_16 = self.pos_ffn.w_2_v.weight primals_17 = self.pos_ffn.w_2_v.bias primals_18 = self.pos_ffn.layer_norm.weight primals_19 = self.pos_ffn.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0], output[1]
bahducoup/factorized_training
LowRankEncoderLayer
false
12,173
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
MultiHeadedAttention
import math import torch import torch.nn as nn import torch.nn.functional as F class MultiHeadedAttention(nn.Module): def __init__(self, num_head, d_model, dropout=0.1): super(MultiHeadedAttention, self).__init__() assert d_model % num_head == 0 self.d_k = d_model // num_head self.h = num_head self.linear_key = nn.Linear(d_model, d_model) self.linear_value = nn.Linear(d_model, d_model) self.linear_query = nn.Linear(d_model, d_model) self.linear_out = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def attention(self, query, key, value, mask, dropout=None): d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def forward(self, query, key, value, mask): nbatches = query.size(0) query = self.linear_query(query).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) key = self.linear_key(key).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) value = self.linear_value(value).view(nbatches, -1, self.h, self.d_k ).transpose(1, 2) mask = mask.unsqueeze(1) x, _attn = self.attention(query, key, value, mask, dropout=self.dropout ) x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k ) return self.linear_out(x) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_head': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -1000000000.0 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x3, tmp20, xmask) tl.store(out_ptr1 + x3, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x5, xmask) tmp6 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -1000000000.0 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + x5, tmp10, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_10, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6, buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf10, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_12 return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadedAttentionNew(nn.Module): def __init__(self, num_head, d_model, dropout=0.1): super(MultiHeadedAttentionNew, self).__init__() assert d_model % num_head == 0 self.d_k = d_model // num_head self.h = num_head self.linear_key = nn.Linear(d_model, d_model) self.linear_value = nn.Linear(d_model, d_model) self.linear_query = nn.Linear(d_model, d_model) self.linear_out = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def attention(self, query, key, value, mask, dropout=None): d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.linear_key.weight primals_3 = self.linear_key.bias primals_4 = self.linear_value.weight primals_5 = self.linear_value.bias primals_7 = self.linear_query.weight primals_8 = self.linear_query.bias primals_11 = self.linear_out.weight primals_12 = self.linear_out.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
bekirufuk/pointer_summarizer
MultiHeadedAttention
false
12,174
[ "Apache-2.0" ]
0
8fc9726f9337b26339848d896a09e7e8f9456bcc
https://github.com/bekirufuk/pointer_summarizer/tree/8fc9726f9337b26339848d896a09e7e8f9456bcc
Aggregate
import torch from torch import nn as nn class Aggregate(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whether the output tensor has dim retained or not. """ def __init__(self, axis, mean=False, keepdim=True): super(Aggregate, self).__init__() self.average = mean self.axis = axis self.keepdim = keepdim def forward(self, input, mask=None): """Compute layer output. Args: input (torch.Tensor): input data. mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask. Returns: torch.Tensor: layer output. """ if mask is not None: input = input * mask[..., None] y = torch.sum(input, self.axis) if self.average: if mask is not None: N = torch.sum(mask, self.axis, keepdim=self.keepdim) N = torch.max(N, other=torch.ones_like(N)) else: N = input.size(self.axis) y = y / N return y def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'axis': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AggregateNew(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whether the output tensor has dim retained or not. """ def __init__(self, axis, mean=False, keepdim=True): super(AggregateNew, self).__init__() self.average = mean self.axis = axis self.keepdim = keepdim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
blindcharzard/AttnSchNet
Aggregate
false
12,175
[ "MIT" ]
0
297bd130086459be6b732d68377193e244536bfc
https://github.com/blindcharzard/AttnSchNet/tree/297bd130086459be6b732d68377193e244536bfc
GVPDropout
import torch from torch import nn class GVPDropout(nn.Module): """ Separate dropout for scalars and vectors. """ def __init__(self, rate): super().__init__() self.vector_dropout = nn.Dropout2d(rate) self.feat_dropout = nn.Dropout(rate) def forward(self, feats, vectors): return self.feat_dropout(feats), self.vector_dropout(vectors) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'rate': 0.5}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, arg1_1 class GVPDropoutNew(nn.Module): """ Separate dropout for scalars and vectors. """ def __init__(self, rate): super().__init__() self.vector_dropout = nn.Dropout2d(rate) self.feat_dropout = nn.Dropout(rate) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
blazingsiyan/geometric-vector-perceptron
GVPDropout
false
12,176
[ "MIT" ]
0
eee1ee8e71148cfdb3e02b660d80f12cf1cecd0a
https://github.com/blazingsiyan/geometric-vector-perceptron/tree/eee1ee8e71148cfdb3e02b660d80f12cf1cecd0a
ResidualBlock
import torch import torch.nn as nn def conv3x3(in_ch, out_ch, stride=1): """3x3 convolution with padding.""" return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1) class ResidualBlock(nn.Module): """Simple residual block with two 3x3 convolutions. Args: in_ch (int): number of input channels out_ch (int): number of output channels """ def __init__(self, in_ch, out_ch): super().__init__() self.conv1 = conv3x3(in_ch, out_ch) self.leaky_relu = nn.LeakyReLU(inplace=True) self.conv2 = conv3x3(out_ch, out_ch) def forward(self, x): identity = x out = self.conv1(x) out = self.leaky_relu(out) out = self.conv2(out) out = self.leaky_relu(out) out = out + identity return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp9 = tmp7 + tmp8 tmp10 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr1 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1[grid (256)](buf2, primals_5, primals_1, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_5 return buf3, primals_1, primals_2, primals_4, buf1, buf4 def conv3x3(in_ch, out_ch, stride=1): """3x3 convolution with padding.""" return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1) class ResidualBlockNew(nn.Module): """Simple residual block with two 3x3 convolutions. Args: in_ch (int): number of input channels out_ch (int): number of output channels """ def __init__(self, in_ch, out_ch): super().__init__() self.conv1 = conv3x3(in_ch, out_ch) self.leaky_relu = nn.LeakyReLU(inplace=True) self.conv2 = conv3x3(out_ch, out_ch) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
blakecheng/CompressAI
ResidualBlock
false
12,177
[ "Apache-2.0" ]
0
7a919e509bafacc99055dd88fc20315f3b9fc1fc
https://github.com/blakecheng/CompressAI/tree/7a919e509bafacc99055dd88fc20315f3b9fc1fc
GVPLayerNorm
import torch from torch import nn class GVPLayerNorm(nn.Module): """ Normal layer norm for scalars, nontrainable norm for vectors. """ def __init__(self, feats_h_size, eps=1e-08): super().__init__() self.eps = eps self.feat_norm = nn.LayerNorm(feats_h_size) def forward(self, feats, vectors): vector_norm = vectors.norm(dim=(-1, -2), keepdim=True) normed_feats = self.feat_norm(feats) normed_vectors = vectors / (vector_norm + self.eps) return normed_feats, normed_vectors def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'feats_h_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_linalg_vector_norm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = 1e-08 tmp8 = tmp6 + tmp7 tmp9 = tmp0 / tmp8 tl.store(out_ptr1 + (r1 + 16 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_0[grid(16)](primals_1, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_4, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](primals_4, buf1, buf2, primals_2, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del primals_2 del primals_3 return buf3, buf4, primals_4 class GVPLayerNormNew(nn.Module): """ Normal layer norm for scalars, nontrainable norm for vectors. """ def __init__(self, feats_h_size, eps=1e-08): super().__init__() self.eps = eps self.feat_norm = nn.LayerNorm(feats_h_size) def forward(self, input_0, input_1): primals_2 = self.feat_norm.weight primals_3 = self.feat_norm.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
blazingsiyan/geometric-vector-perceptron
GVPLayerNorm
false
12,178
[ "MIT" ]
0
eee1ee8e71148cfdb3e02b660d80f12cf1cecd0a
https://github.com/blazingsiyan/geometric-vector-perceptron/tree/eee1ee8e71148cfdb3e02b660d80f12cf1cecd0a
LowRankDecoderLayer
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.fc_u = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_v = nn.Linear(int(d_model / 4), d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, _len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs_v(self.w_qs_u(q)).view(sz_b, len_q, n_head, d_k) k = self.w_ks_v(self.w_ks_u(k)).view(sz_b, len_k, n_head, d_k) v = self.w_vs_v(self.w_vs_u(v)).view(sz_b, len_k, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_v(self.fc_u(q))) q += residual q = self.layer_norm(q) return q, attn class LowRankPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_v = nn.Linear(int(d_in / 4), d_hid) self.w_2_u = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_v = nn.Linear(int(d_in / 4), d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = self.w_2_v(self.w_2_u(F.relu(self.w_1_v(self.w_1_u(x))))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankDecoderLayer(nn.Module): """ Compose with three layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankDecoderLayer, self).__init__() self.slf_attn = LowRankMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.enc_attn = LowRankMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankPositionwiseFeedForward(d_model, d_inner, dropout=dropout) @autocast() def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None): dec_output, dec_slf_attn = self.slf_attn(dec_input, dec_input, dec_input, mask=slf_attn_mask) dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output, enc_output, mask=dec_enc_attn_mask) dec_output = self.pos_ffn(dec_output) return dec_output, dec_slf_attn, dec_enc_attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last').to(tl.float32) tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_t_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .float32) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 + tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp9.to(tl.float32) tmp12 = tmp10 + tmp11 tmp13 = tmp8 + tmp12 tmp15 = tmp14.to(tl.float32) tmp17 = tmp15 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused__to_copy_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp5 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp14.to(tl.float32) tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32) tmp2 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused__to_copy_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_13(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 + tmp2 tmp4 = tmp3.to(tl.float32) tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_native_layer_norm_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (1, 16), (16, 1)) assert_size_stride(primals_9, (4, 1), (1, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (16, 4), (4, 1)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (16, 4), (4, 1)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (16, 4), (4, 1)) assert_size_stride(primals_19, (1, 16), (16, 1)) assert_size_stride(primals_20, (4, 1), (1, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (1, 4), (4, 1)) assert_size_stride(primals_24, (4, 1), (1, 1)) assert_size_stride(primals_25, (4,), (1,)) assert_size_stride(primals_26, (1, 4), (4, 1)) assert_size_stride(primals_27, (4, 1), (1, 1)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) assert_size_stride(primals_30, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_2, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_3, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), out=buf4) buf5 = buf1 del buf1 triton_poi_fused__to_copy_1[grid(16)](primals_4, buf5, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (1, 4), 0), out=buf6) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_5, buf7, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_5 buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf6, reinterpret_tensor(buf7, (4, 16), (1, 4), 0 ), out=buf8) buf9 = buf5 del buf5 triton_poi_fused__to_copy_1[grid(16)](primals_6, buf9, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_6 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), out=buf10) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_7, buf11, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_7 buf12 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf10, reinterpret_tensor(buf11, (4, 16), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf4, buf13, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_clone_3[grid(64, 4)](buf8, buf14, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf15 = reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0) del buf8 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), out=buf15 ) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf15, buf16, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf18 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf15 triton_poi_fused__softmax__to_copy_5[grid(256)](buf16, buf17, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf12, buf19, 256, XBLOCK=256, num_warps=4, num_stages=1) buf20 = reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0) del buf12 extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20 ) buf21 = reinterpret_tensor(buf9, (16, 1), (1, 16), 0) del buf9 triton_poi_fused__to_copy_1[grid(16)](primals_8, buf21, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_8 buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf20, buf22, 256, XBLOCK=256, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf22, (16, 16), (16, 1), 0), buf21, out=buf23) buf24 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_9, buf24, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_9 buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf23, buf24, out=buf25) buf26 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf27 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(16)](buf25, primals_1, buf26, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_add_native_layer_norm_9[grid(64)](buf25, primals_1, buf26, buf27, primals_10, primals_11, buf28, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf29 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_13, buf29, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_13 buf31 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf29, out=buf31) buf32 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_14, buf32, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_14 buf33 = reinterpret_tensor(buf20, (16, 16), (16, 1), 0) del buf20 extern_kernels.mm(buf31, reinterpret_tensor(buf32, (4, 16), (1, 4), 0), out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_12, buf34, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_12 buf35 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_15, buf35, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_15 buf36 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf34, (16, 4), (4, 1), 0), reinterpret_tensor(buf35, (4, 4), (1, 4), 0), out=buf36) buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_16, buf37, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_16 buf38 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf36, reinterpret_tensor(buf37, (4, 16), (1, 4), 0), out=buf38) buf39 = buf35 del buf35 triton_poi_fused__to_copy_1[grid(16)](primals_17, buf39, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_17 buf40 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf34, (16, 4), (4, 1), 0), reinterpret_tensor(buf39, (4, 4), (1, 4), 0), out=buf40) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_18, buf41, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_18 buf42 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf40, reinterpret_tensor(buf41, (4, 16), (1, 4), 0), out=buf42) buf43 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf33, buf43, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf44 = reinterpret_tensor(buf33, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf33 triton_poi_fused_clone_3[grid(64, 4)](buf38, buf44, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf45 = reinterpret_tensor(buf38, (16, 4, 4), (16, 4, 1), 0) del buf38 extern_kernels.bmm(reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf44, (16, 4, 4), (16, 4, 1), 0), out=buf45 ) buf46 = buf16 del buf16 triton_poi_fused__softmax_4[grid(256)](buf45, buf46, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf47 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf48 = reinterpret_tensor(buf45, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf45 triton_poi_fused__softmax__to_copy_5[grid(256)](buf46, buf47, buf48, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf46 buf49 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf42, buf49, 256, XBLOCK=256, num_warps=4, num_stages=1) buf50 = reinterpret_tensor(buf42, (16, 4, 4), (16, 4, 1), 0) del buf42 extern_kernels.bmm(reinterpret_tensor(buf48, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0), out=buf50 ) buf51 = reinterpret_tensor(buf39, (16, 1), (1, 16), 0) del buf39 triton_poi_fused__to_copy_1[grid(16)](primals_19, buf51, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_19 buf52 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_6[grid(256)](buf50, buf52, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf50 buf53 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf52, (16, 16), (16, 1), 0), buf51, out=buf53) buf54 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_20, buf54, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_20 buf55 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf53, buf54, out=buf55) buf56 = buf28 del buf28 triton_poi_fused_add_10[grid(64)](buf56, buf55, 64, XBLOCK=64, num_warps=1, num_stages=1) buf57 = buf27 del buf27 buf58 = buf26 del buf26 triton_poi_fused_native_layer_norm_11[grid(16)](buf56, buf57, buf58, 16, XBLOCK=16, num_warps=1, num_stages=1) buf59 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf61 = reinterpret_tensor(buf55, (4, 4, 4), (16, 4, 1), 0) del buf55 triton_poi_fused__to_copy_native_layer_norm_12[grid(64)](buf56, buf57, buf58, primals_21, primals_22, buf59, buf61, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_22 buf60 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_23, buf60, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_23 buf62 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf61, (16, 4), (4, 1), 0), buf60, out=buf62) buf63 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_24, buf63, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_24 buf64 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf62, buf63, out=buf64) buf65 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_26, buf65, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_26 buf66 = reinterpret_tensor(buf64, (4, 4, 4), (16, 4, 1), 0) del buf64 buf74 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_13[grid(64)](buf66, primals_25, buf74, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_25 buf67 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf66, (16, 4), (4, 1), 0), buf65, out=buf67) buf68 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_7[grid(4)](primals_27, buf68, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_27 buf69 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(buf67, buf68, out=buf69) buf70 = buf59 del buf59 triton_poi_fused_add_14[grid(64)](buf70, buf69, primals_28, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf69 del primals_28 buf71 = buf58 del buf58 buf72 = buf57 del buf57 triton_poi_fused_native_layer_norm_11[grid(16)](buf70, buf71, buf72, 16, XBLOCK=16, num_warps=1, num_stages=1) buf73 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_15[grid(64)](buf70, buf71, buf72, primals_29, primals_30, buf73, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf71 del buf72 del primals_30 return (buf73, buf17, buf47, primals_1, primals_10, primals_21, primals_29, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 16), (1, 4), 0), buf2, reinterpret_tensor(buf7, (4, 16), (1, 4), 0), buf6, reinterpret_tensor(buf11, (4, 16), (1, 4), 0), buf10, buf17, reinterpret_tensor(buf22, (16, 16), (16, 1), 0), buf23, buf25, reinterpret_tensor(buf30, (16, 4), (4, 1), 0), reinterpret_tensor( buf32, (4, 16), (1, 4), 0), buf31, reinterpret_tensor(buf34, (16, 4 ), (4, 1), 0), reinterpret_tensor(buf37, (4, 16), (1, 4), 0), buf36, reinterpret_tensor(buf41, (4, 16), (1, 4), 0), buf40, buf47, reinterpret_tensor(buf52, (16, 16), (16, 1), 0), buf53, buf56, reinterpret_tensor(buf61, (16, 4), (4, 1), 0), buf62, reinterpret_tensor(buf66, (16, 4), (4, 1), 0), buf67, buf70, reinterpret_tensor(buf68, (4, 1), (1, 1), 0), reinterpret_tensor( buf65, (1, 4), (4, 1), 0), buf74, reinterpret_tensor(buf63, (4, 1), (1, 1), 0), reinterpret_tensor(buf60, (1, 4), (4, 1), 0), reinterpret_tensor(buf54, (4, 1), (1, 1), 0), reinterpret_tensor( buf51, (1, 16), (16, 1), 0), reinterpret_tensor(buf48, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf49, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf43, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf44, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf29, (4, 4), (4, 1), 0), reinterpret_tensor( buf24, (4, 1), (1, 1), 0), reinterpret_tensor(buf21, (1, 16), (16, 1), 0), reinterpret_tensor(buf18, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0)) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_u = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_v = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.fc_u = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_v = nn.Linear(int(d_model / 4), d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, _len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs_v(self.w_qs_u(q)).view(sz_b, len_q, n_head, d_k) k = self.w_ks_v(self.w_ks_u(k)).view(sz_b, len_k, n_head, d_k) v = self.w_vs_v(self.w_vs_u(v)).view(sz_b, len_k, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_v(self.fc_u(q))) q += residual q = self.layer_norm(q) return q, attn class LowRankPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_v = nn.Linear(int(d_in / 4), d_hid) self.w_2_u = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_v = nn.Linear(int(d_in / 4), d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = self.w_2_v(self.w_2_u(F.relu(self.w_1_v(self.w_1_u(x))))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankDecoderLayerNew(nn.Module): """ Compose with three layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankDecoderLayerNew, self).__init__() self.slf_attn = LowRankMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.enc_attn = LowRankMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankPositionwiseFeedForward(d_model, d_inner, dropout=dropout) def forward(self, input_0, input_1): primals_2 = self.slf_attn.w_qs_u.weight primals_3 = self.slf_attn.w_qs_v.weight primals_4 = self.slf_attn.w_ks_u.weight primals_5 = self.slf_attn.w_ks_v.weight primals_6 = self.slf_attn.w_vs_u.weight primals_7 = self.slf_attn.w_vs_v.weight primals_8 = self.slf_attn.fc_u.weight primals_9 = self.slf_attn.fc_v.weight primals_10 = self.slf_attn.layer_norm.weight primals_11 = self.slf_attn.layer_norm.bias primals_13 = self.enc_attn.w_qs_u.weight primals_14 = self.enc_attn.w_qs_v.weight primals_15 = self.enc_attn.w_ks_u.weight primals_16 = self.enc_attn.w_ks_v.weight primals_17 = self.enc_attn.w_vs_u.weight primals_18 = self.enc_attn.w_vs_v.weight primals_19 = self.enc_attn.fc_u.weight primals_20 = self.enc_attn.fc_v.weight primals_21 = self.enc_attn.layer_norm.weight primals_22 = self.enc_attn.layer_norm.bias primals_23 = self.pos_ffn.w_1_u.weight primals_24 = self.pos_ffn.w_1_v.weight primals_25 = self.pos_ffn.w_1_v.bias primals_26 = self.pos_ffn.w_2_u.weight primals_27 = self.pos_ffn.w_2_v.weight primals_28 = self.pos_ffn.w_2_v.bias primals_29 = self.pos_ffn.layer_norm.weight primals_30 = self.pos_ffn.layer_norm.bias primals_1 = input_0 primals_12 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return output[0], output[1], output[2]
bahducoup/factorized_training
LowRankDecoderLayer
false
12,179
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
GroupedChannelNorm
import torch import torch.utils.data import torch import torch.nn as nn class GroupedChannelNorm(nn.Module): def __init__(self, num_groups): super().__init__() self.num_groups = num_groups def forward(self, x): shape = list(x.shape) new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups ] + shape[2:] x = x.view(*new_shape) mean = x.mean(dim=2, keepdim=True) std = x.std(dim=2, keepdim=True) x_norm = (x - mean) / (std + 1e-07) return x_norm.view(*shape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-07 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tl.store(out_ptr0 + x3, tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 1, 16, 4, 1), torch .float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), class GroupedChannelNormNew(nn.Module): def __init__(self, num_groups): super().__init__() self.num_groups = num_groups def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bomtorazek/contrastive-unpaired-translation
GroupedChannelNorm
false
12,180
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
MLPNet
import torch import torch.nn as nn import torch.nn.functional as F class MLPNet(nn.Module): def __init__(self): super(MLPNet, self).__init__() self.fc1 = nn.Linear(28 * 28, 500) self.fc2 = nn.Linear(500, 256) self.fc3 = nn.Linear(256, 10) def forward(self, x): x = x.view(-1, 28 * 28) x = F.leaky_relu(self.fc1(x)) x = F.leaky_relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (500, 784), (784, 1)) assert_size_stride(primals_3, (500,), (1,)) assert_size_stride(primals_4, (256, 500), (500, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (10, 256), (256, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 500), (1, 784), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 500), (500, 1), torch.bool) buf2 = empty_strided_cuda((4, 500), (500, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(2000)](buf0, primals_3, buf1, buf2, 2000, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (500, 256), ( 1, 500), 0), out=buf3) buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool) buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(1024)](buf3, primals_5, buf4, buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_7 buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__softmax_2[grid(4)](buf6, buf9, 4, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf6 return buf9, primals_1, buf1, buf2, buf4, buf5, buf9, primals_6, primals_4 class MLPNetNew(nn.Module): def __init__(self): super(MLPNetNew, self).__init__() self.fc1 = nn.Linear(28 * 28, 500) self.fc2 = nn.Linear(500, 256) self.fc3 = nn.Linear(256, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
bluebibi/flask_rest
MLPNet
false
12,181
[ "MIT" ]
0
9b1ee876060bca5d97459bb894c73530f66c4c15
https://github.com/bluebibi/flask_rest/tree/9b1ee876060bca5d97459bb894c73530f66c4c15
FusedLeakyReLU
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): out = fused_leaky_relu(input, self.bias, self.negative_slope, self. scale) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_leaky_relu_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = 1.4142135623730951 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp9, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_leaky_relu_mul_0[grid(256)](primals_2, primals_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf1, buf0 def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class FusedLeakyReLUNew(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1)) self.negative_slope = negative_slope self.scale = scale def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
bomtorazek/contrastive-unpaired-translation
FusedLeakyReLU
false
12,182
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
net
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn class net(nn.Module): def __init__(self, input_size, output_size): super(net, self).__init__() self.fc1 = nn.Linear(in_features=input_size, out_features=64) self.fc2 = nn.Linear(in_features=64, out_features=64) self.fc3 = nn.Linear(in_features=64, out_features=output_size) def forward(self, x): if isinstance(x, np.ndarray): x = torch.tensor(x, dtype=torch.float) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_3, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3, primals_5, buf5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 64), (64, 1), 0), primals_6, buf5, primals_4, buf6 class netNew(nn.Module): def __init__(self, input_size, output_size): super(netNew, self).__init__() self.fc1 = nn.Linear(in_features=input_size, out_features=64) self.fc2 = nn.Linear(in_features=64, out_features=64) self.fc3 = nn.Linear(in_features=64, out_features=output_size) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
brabeem/deep-reinforcement-learning
net
false
12,183
[ "MIT" ]
0
aff919545a1b6d9d44f5aaaa13b9981c888e7169
https://github.com/brabeem/deep-reinforcement-learning/tree/aff919545a1b6d9d44f5aaaa13b9981c888e7169
DecoderLayer
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc(q)) q += residual q = self.layer_norm(q) return q, attn class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class DecoderLayer(nn.Module): """ Compose with three layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(DecoderLayer, self).__init__() self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout= dropout) @autocast() def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None): dec_output, dec_slf_attn = self.slf_attn(dec_input, dec_input, dec_input, mask=slf_attn_mask) dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output, enc_output, mask=dec_enc_attn_mask) dec_output = self.pos_ffn(dec_output) return dec_output, dec_slf_attn, dec_enc_attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last').to(tl.float32) tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .float32) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 + tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp9.to(tl.float32) tmp12 = tmp10 + tmp11 tmp13 = tmp8 + tmp12 tmp15 = tmp14.to(tl.float32) tmp17 = tmp15 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused__to_copy_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp5 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp14.to(tl.float32) tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32) tmp2 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused__to_copy_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__to_copy_t_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_12(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 + tmp2 tmp4 = tmp3.to(tl.float32) tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_native_layer_norm_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 16), (16, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_2, buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 16), (1, 4), 0), out=buf2) buf3 = buf1 del buf1 triton_poi_fused__to_copy_0[grid(64)](primals_3, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 16), (1, 4), 0), out=buf4) buf5 = buf3 del buf3 triton_poi_fused__to_copy_0[grid(64)](primals_4, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_4 buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 16), (1, 4), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_1[grid(256)](buf2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_clone_2[grid(64, 4)](buf4, buf8, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf9 triton_poi_fused__softmax__to_copy_4[grid(256)](buf10, buf11, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_5[grid(256)](buf6, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0) del buf6 extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), out=buf14 ) buf15 = reinterpret_tensor(buf5, (16, 4), (1, 16), 0) del buf5 triton_poi_fused__to_copy_0[grid(64)](primals_5, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_5[grid(256)](buf14, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf16, (16, 16), (16, 1), 0), buf15, out=buf17) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf19 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(16)](buf17, primals_1, buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_add_native_layer_norm_7[grid(64)](buf17, primals_1, buf18, buf19, primals_6, primals_7, buf20, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_9, buf21, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_9 buf23 = reinterpret_tensor(buf14, (16, 16), (16, 1), 0) del buf14 extern_kernels.mm(reinterpret_tensor(buf22, (16, 4), (4, 1), 0), reinterpret_tensor(buf21, (4, 16), (1, 4), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_8, buf24, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_8 buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_10, buf25, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_10 buf26 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(buf25, (4, 16), (1, 4), 0), out=buf26) buf27 = buf25 del buf25 triton_poi_fused__to_copy_0[grid(64)](primals_11, buf27, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_11 buf28 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(buf27, (4, 16), (1, 4), 0), out=buf28) buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_1[grid(256)](buf23, buf29, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf30 = reinterpret_tensor(buf23, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf23 triton_poi_fused_clone_2[grid(64, 4)](buf26, buf30, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf31 = reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0) del buf26 extern_kernels.bmm(reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf30, (16, 4, 4), (16, 4, 1), 0), out=buf31 ) buf32 = buf10 del buf10 triton_poi_fused__softmax_3[grid(256)](buf31, buf32, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf34 = reinterpret_tensor(buf31, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf31 triton_poi_fused__softmax__to_copy_4[grid(256)](buf32, buf33, buf34, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf32 buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_5[grid(256)](buf28, buf35, 256, XBLOCK=128, num_warps=4, num_stages=1) buf36 = reinterpret_tensor(buf28, (16, 4, 4), (16, 4, 1), 0) del buf28 extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36 ) buf37 = reinterpret_tensor(buf27, (16, 4), (1, 16), 0) del buf27 triton_poi_fused__to_copy_0[grid(64)](primals_12, buf37, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_12 buf38 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_5[grid(256)](buf36, buf38, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf36 buf39 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf38, (16, 16), (16, 1), 0), buf37, out=buf39) buf40 = buf20 del buf20 triton_poi_fused_add_8[grid(64)](buf40, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1) buf41 = buf19 del buf19 buf42 = buf18 del buf18 triton_poi_fused_native_layer_norm_9[grid(16)](buf40, buf41, buf42, 16, XBLOCK=16, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf44 = reinterpret_tensor(buf39, (4, 4, 4), (16, 4, 1), 0) del buf39 triton_poi_fused__to_copy_native_layer_norm_10[grid(64)](buf40, buf41, buf42, primals_13, primals_14, buf43, buf44, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_14 buf45 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_t_11[grid(16)](primals_15, buf45, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_15 buf46 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf44, (16, 4), (4, 1), 0), buf45, out=buf46) buf47 = reinterpret_tensor(buf46, (4, 4, 4), (16, 4, 1), 0) del buf46 buf54 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_12[grid(64)](buf47, primals_16, buf54, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf48 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_t_11[grid(16)](primals_17, buf48, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_17 buf49 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf47, (16, 4), (4, 1), 0), buf48, out=buf49) buf50 = buf43 del buf43 triton_poi_fused_add_13[grid(64)](buf50, buf49, primals_18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf49 del primals_18 buf51 = buf42 del buf42 buf52 = buf41 del buf41 triton_poi_fused_native_layer_norm_9[grid(16)](buf50, buf51, buf52, 16, XBLOCK=16, num_warps=1, num_stages=1) buf53 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_14[grid(64)](buf50, buf51, buf52, primals_19, primals_20, buf53, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf51 del buf52 del primals_20 return (buf53, buf11, buf33, primals_1, primals_6, primals_13, primals_19, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf16, (16, 16), (16, 1), 0), buf17, reinterpret_tensor(buf21, (4, 16), (1, 4), 0), reinterpret_tensor( buf22, (16, 4), (4, 1), 0), reinterpret_tensor(buf24, (16, 4), (4, 1), 0), buf33, reinterpret_tensor(buf38, (16, 16), (16, 1), 0), buf40, reinterpret_tensor(buf44, (16, 4), (4, 1), 0), reinterpret_tensor(buf47, (16, 4), (4, 1), 0), buf50, reinterpret_tensor(buf48, (4, 4), (4, 1), 0), buf54, reinterpret_tensor(buf45, (4, 4), (4, 1), 0), reinterpret_tensor( buf37, (4, 16), (16, 1), 0), reinterpret_tensor(buf34, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf29, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf30, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (4, 16), (16, 1), 0), reinterpret_tensor( buf12, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0)) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc(q)) q += residual q = self.layer_norm(q) return q, attn class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class DecoderLayerNew(nn.Module): """ Compose with three layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(DecoderLayerNew, self).__init__() self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout= dropout) def forward(self, input_0, input_1): primals_2 = self.slf_attn.w_qs.weight primals_3 = self.slf_attn.w_ks.weight primals_4 = self.slf_attn.w_vs.weight primals_5 = self.slf_attn.fc.weight primals_6 = self.slf_attn.layer_norm.weight primals_7 = self.slf_attn.layer_norm.bias primals_9 = self.enc_attn.w_qs.weight primals_10 = self.enc_attn.w_ks.weight primals_11 = self.enc_attn.w_vs.weight primals_12 = self.enc_attn.fc.weight primals_13 = self.enc_attn.layer_norm.weight primals_14 = self.enc_attn.layer_norm.bias primals_15 = self.pos_ffn.w_1.weight primals_16 = self.pos_ffn.w_1.bias primals_17 = self.pos_ffn.w_2.weight primals_18 = self.pos_ffn.w_2.bias primals_19 = self.pos_ffn.layer_norm.weight primals_20 = self.pos_ffn.layer_norm.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0], output[1], output[2]
bahducoup/factorized_training
DecoderLayer
false
12,184
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
Normalize
import torch import torch.utils.data import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): def __init__(self, power=2): super(NormalizeNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bomtorazek/contrastive-unpaired-translation
Normalize
false
12,185
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
PoolingF
import torch import torch.utils.data import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out class PoolingF(nn.Module): def __init__(self): super(PoolingF, self).__init__() model = [nn.AdaptiveMaxPool2d(1)] self.model = nn.Sequential(*model) self.l2norm = Normalize(2) def forward(self, x): return self.l2norm(self.model(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_pow_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_div_pow_sum_1[grid(16)](buf0, buf1, 16, XBLOCK =16, num_warps=1, num_stages=1) del buf0 return buf1, class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out class PoolingFNew(nn.Module): def __init__(self): super(PoolingFNew, self).__init__() model = [nn.AdaptiveMaxPool2d(1)] self.model = nn.Sequential(*model) self.l2norm = Normalize(2) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bomtorazek/contrastive-unpaired-translation
PoolingF
false
12,186
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
Critic
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=24, fc2_units=48): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 28 x1 = xindex // 28 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 24, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (24 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 28, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-24 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 48 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 24 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (24, 4), (4, 1)) assert_size_stride(primals_2, (24,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (48, 28), (28, 1)) assert_size_stride(primals_6, (48,), (1,)) assert_size_stride(primals_7, (1, 48), (48, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 24), (24, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 28), (28, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(112)](buf0, primals_2, primals_4, buf1, 112, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 48), (48, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (28, 48), (1, 28), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(192)](buf3, primals_6, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (48, 1), (1, 48), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 24), (24, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(96)](buf0, primals_2, buf6, 96, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=24, fc2_units=48): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
brabeem/deep-reinforcement-learning
Critic
false
12,187
[ "MIT" ]
0
aff919545a1b6d9d44f5aaaa13b9981c888e7169
https://github.com/brabeem/deep-reinforcement-learning/tree/aff919545a1b6d9d44f5aaaa13b9981c888e7169
Actor
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=24, fc2_units=48): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state): """Build an actor (policy) network that maps states -> actions.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return F.tanh(self.fc3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 24 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 48 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (24, 4), (4, 1)) assert_size_stride(primals_2, (24,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (48, 24), (24, 1)) assert_size_stride(primals_5, (48,), (1,)) assert_size_stride(primals_6, (4, 48), (48, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf1, primals_2, buf7, 1536, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 48), (48, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(primals_4, (24, 48), (1, 24), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 48), (768, 192, 48, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 48), (768, 192, 48, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(3072)](buf3, primals_5, buf6, 3072, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 48), (48, 1), 0), reinterpret_tensor(primals_6, (48, 4), (1, 48), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor( buf3, (64, 48), (48, 1), 0), buf5, primals_6, buf6, primals_4, buf7 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class ActorNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=24, fc2_units=48): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(ActorNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
brabeem/deep-reinforcement-learning
Actor
false
12,188
[ "MIT" ]
0
aff919545a1b6d9d44f5aaaa13b9981c888e7169
https://github.com/brabeem/deep-reinforcement-learning/tree/aff919545a1b6d9d44f5aaaa13b9981c888e7169
ContextPooler
from _paritybench_helpers import _mock_config import math import torch from torch import nn def get_mask(input, local_context): if not isinstance(local_context, DropoutContext): dropout = local_context mask = None else: dropout = local_context.dropout dropout *= local_context.scale mask = local_context.mask if local_context.reuse_mask else None if dropout > 0 and mask is None: mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool() if isinstance(local_context, DropoutContext): if local_context.mask is None: local_context.mask = mask return mask, dropout def gelu(x): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) class DropoutContext(object): def __init__(self): self.dropout = 0 self.mask = None self.scale = 1 self.reuse_mask = True class XDropout(torch.autograd.Function): """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" @staticmethod def forward(ctx, input, local_ctx): mask, dropout = get_mask(input, local_ctx) ctx.scale = 1.0 / (1 - dropout) if dropout > 0: ctx.save_for_backward(mask) return input.masked_fill(mask, 0) * ctx.scale else: return input @staticmethod def backward(ctx, grad_output): if ctx.scale > 1: mask, = ctx.saved_tensors return grad_output.masked_fill(mask, 0) * ctx.scale, None else: return grad_output, None class StableDropout(torch.nn.Module): """ Optimized dropout module for stabilizing the training Args: drop_prob (float): the dropout probabilities """ def __init__(self, drop_prob): super().__init__() self.drop_prob = drop_prob self.count = 0 self.context_stack = None def forward(self, x): """ Call the module Args: x (:obj:`torch.tensor`): The input tensor to apply dropout """ if self.training and self.drop_prob > 0: return XDropout.apply(x, self.get_context()) return x def clear_context(self): self.count = 0 self.context_stack = None def init_context(self, reuse_mask=True, scale=1): if self.context_stack is None: self.context_stack = [] self.count = 0 for c in self.context_stack: c.reuse_mask = reuse_mask c.scale = scale def get_context(self): if self.context_stack is not None: if self.count >= len(self.context_stack): self.context_stack.append(DropoutContext()) ctx = self.context_stack[self.count] ctx.dropout = self.drop_prob self.count += 1 return ctx else: return self.drop_prob class ContextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.pooler_hidden_size, config. pooler_hidden_size) self.dropout = StableDropout(config.pooler_dropout) self.config = config def forward(self, hidden_states): context_token = hidden_states[:, 0] context_token = self.dropout(context_token) pooled_output = self.dense(context_token) pooled_output = gelu(pooled_output) return pooled_output @property def output_dim(self): return self.config.hidden_size def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(pooler_hidden_size=4, pooler_dropout=0.5, hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_pow_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp2 * tmp2 tmp6 = tmp5 * tmp2 tmp7 = 0.044715 tmp8 = tmp6 * tmp7 tmp9 = tmp2 + tmp8 tmp10 = 0.7978845608028654 tmp11 = tmp9 * tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = 1.0 tmp14 = tmp12 + tmp13 tmp15 = tmp4 * tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_tanh_1[grid(64)](buf1, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1 def get_mask(input, local_context): if not isinstance(local_context, DropoutContext): dropout = local_context mask = None else: dropout = local_context.dropout dropout *= local_context.scale mask = local_context.mask if local_context.reuse_mask else None if dropout > 0 and mask is None: mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool() if isinstance(local_context, DropoutContext): if local_context.mask is None: local_context.mask = mask return mask, dropout def gelu(x): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) class DropoutContext(object): def __init__(self): self.dropout = 0 self.mask = None self.scale = 1 self.reuse_mask = True class XDropout(torch.autograd.Function): """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" @staticmethod def forward(ctx, input, local_ctx): mask, dropout = get_mask(input, local_ctx) ctx.scale = 1.0 / (1 - dropout) if dropout > 0: ctx.save_for_backward(mask) return input.masked_fill(mask, 0) * ctx.scale else: return input @staticmethod def backward(ctx, grad_output): if ctx.scale > 1: mask, = ctx.saved_tensors return grad_output.masked_fill(mask, 0) * ctx.scale, None else: return grad_output, None class StableDropout(torch.nn.Module): """ Optimized dropout module for stabilizing the training Args: drop_prob (float): the dropout probabilities """ def __init__(self, drop_prob): super().__init__() self.drop_prob = drop_prob self.count = 0 self.context_stack = None def forward(self, x): """ Call the module Args: x (:obj:`torch.tensor`): The input tensor to apply dropout """ if self.training and self.drop_prob > 0: return XDropout.apply(x, self.get_context()) return x def clear_context(self): self.count = 0 self.context_stack = None def init_context(self, reuse_mask=True, scale=1): if self.context_stack is None: self.context_stack = [] self.count = 0 for c in self.context_stack: c.reuse_mask = reuse_mask c.scale = scale def get_context(self): if self.context_stack is not None: if self.count >= len(self.context_stack): self.context_stack.append(DropoutContext()) ctx = self.context_stack[self.count] ctx.dropout = self.drop_prob self.count += 1 return ctx else: return self.drop_prob class ContextPoolerNew(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.pooler_hidden_size, config. pooler_hidden_size) self.dropout = StableDropout(config.pooler_dropout) self.config = config @property def output_dim(self): return self.config.hidden_size def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
c370300679/ClinicalTransformerNER
ContextPooler
false
12,189
[ "MIT" ]
0
4a4a796775f75f6d5adc053e956ec6a0ae6fe2f3
https://github.com/c370300679/ClinicalTransformerNER/tree/4a4a796775f75f6d5adc053e956ec6a0ae6fe2f3
QNetwork
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): def __init__(self, state_size, action_size, seed=0, fc1_units=64, fc2_units=32): super(QNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64), (64, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_2, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3, primals_5, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6 class QNetworkNew(nn.Module): def __init__(self, state_size, action_size, seed=0, fc1_units=64, fc2_units=32): super(QNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
bwosh/DRL_Navigation
QNetwork
false
12,190
[ "MIT" ]
0
ec33a657f826a7f3681cefe2d984690afad4abb8
https://github.com/bwosh/DRL_Navigation/tree/ec33a657f826a7f3681cefe2d984690afad4abb8
MultiHeadAttention
import torch from torch import nn class MultiHeadAttention(nn.Module): def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.0): super().__init__() self.num_heads = num_heads head_dim = dim_self // num_heads self.scale = head_dim ** -0.5 self.to_queries = nn.Linear(dim_self, dim_self, bias=bias) self.to_keys_values = nn.Linear(dim_ref, dim_self * 2, bias=bias) self.project = nn.Linear(dim_self, dim_self) self.dropout = nn.Dropout(dropout) def forward(self, x, y=None, mask=None): y = y if y is not None else x b, n, c = x.shape _, m, _d = y.shape queries = self.to_queries(x).reshape(b, n, self.num_heads, c // self.num_heads) keys_values = self.to_keys_values(y).reshape(b, m, 2, self. num_heads, c // self.num_heads) keys, values = keys_values[:, :, 0], keys_values[:, :, 1] attention = torch.einsum('bnhd,bmhd->bnmh', queries, keys) * self.scale if mask is not None: if mask.dim() == 2: mask = mask.unsqueeze(1) attention = attention.masked_fill(mask.unsqueeze(3), float('-inf')) attention = attention.softmax(dim=2) out = torch.einsum('bnmh,bmhd->bnhd', attention, values).reshape(b, n, c) out = self.project(out) return out, attention def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim_self': 4, 'dim_ref': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp3 tmp8 = triton_helpers.maximum(tmp4, tmp7) tmp10 = tmp0 * tmp9 tmp11 = tmp10 * tmp3 tmp12 = triton_helpers.maximum(tmp8, tmp11) tmp14 = tmp0 * tmp13 tmp15 = tmp14 * tmp3 tmp16 = triton_helpers.maximum(tmp12, tmp15) tmp17 = tmp4 - tmp16 tmp18 = tmp17 * tmp3 tmp19 = tl_math.exp(tmp18) tmp20 = tmp7 - tmp16 tmp21 = tmp20 * tmp3 tmp22 = tl_math.exp(tmp21) tmp23 = tmp19 + tmp22 tmp24 = tmp11 - tmp16 tmp25 = tmp24 * tmp3 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp15 - tmp16 tmp29 = tmp28 * tmp3 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + x3, tmp16, xmask) tl.store(out_ptr1 + x3, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex // 16 x1 = xindex // 4 % 4 x3 = xindex // 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + (x0 + 8 * x1 + 32 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr3 + (x0 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp3 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x5, tmp10, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 8 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_2[grid(16, 16)](buf4, buf5, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf3 triton_poi_fused_clone_3[grid(16, 4)](buf1, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0) del buf9 triton_poi_fused_add_5[grid(64)](buf10, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf10, buf4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (4, 1, 4, 4, 1), (32, 1, 8, 1, 1), 0 ), buf4, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_6, reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0) class MultiHeadAttentionNew(nn.Module): def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.0): super().__init__() self.num_heads = num_heads head_dim = dim_self // num_heads self.scale = head_dim ** -0.5 self.to_queries = nn.Linear(dim_self, dim_self, bias=bias) self.to_keys_values = nn.Linear(dim_ref, dim_self * 2, bias=bias) self.project = nn.Linear(dim_self, dim_self) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.to_queries.weight primals_3 = self.to_queries.bias primals_4 = self.to_keys_values.weight primals_5 = self.to_keys_values.bias primals_6 = self.project.weight primals_7 = self.project.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
bpiyush/CLIP_prefix_caption-video
MultiHeadAttention
false
12,192
[ "MIT" ]
0
3f6a4b8c841189e20b82fd4de127681424311599
https://github.com/bpiyush/CLIP_prefix_caption-video/tree/3f6a4b8c841189e20b82fd4de127681424311599
TransformerBlock
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init class TransformerBlock(nn.Module): def __init__(self, input_size, d_k=16, d_v=16, n_heads=8, is_layer_norm =False, attn_dropout=0.1): super(TransformerBlock, self).__init__() self.n_heads = n_heads self.d_k = d_k if d_k is not None else input_size self.d_v = d_v if d_v is not None else input_size self.is_layer_norm = is_layer_norm if is_layer_norm: self.layer_morm = nn.LayerNorm(normalized_shape=input_size) self.W_q = nn.Parameter(torch.Tensor(input_size, n_heads * d_k)) self.W_k = nn.Parameter(torch.Tensor(input_size, n_heads * d_k)) self.W_v = nn.Parameter(torch.Tensor(input_size, n_heads * d_v)) self.W_o = nn.Parameter(torch.Tensor(d_v * n_heads, input_size)) self.linear1 = nn.Linear(input_size, input_size) self.linear2 = nn.Linear(input_size, input_size) self.dropout = nn.Dropout(attn_dropout) self.__init_weights__() None def __init_weights__(self): init.xavier_normal_(self.W_q) init.xavier_normal_(self.W_k) init.xavier_normal_(self.W_v) init.xavier_normal_(self.W_o) init.xavier_normal_(self.linear1.weight) init.xavier_normal_(self.linear2.weight) def FFN(self, X): output = self.linear2(F.relu(self.linear1(X))) output = self.dropout(output) return output def scaled_dot_product_attention(self, Q, K, V, episilon=1e-06): """ :param Q: (*, max_q_words, n_heads, input_size) :param K: (*, max_k_words, n_heads, input_size) :param V: (*, max_v_words, n_heads, input_size) :param episilon: :return: """ temperature = self.d_k ** 0.5 Q_K = torch.einsum('bqd,bkd->bqk', Q, K) / (temperature + episilon) Q_K_score = F.softmax(Q_K, dim=-1) Q_K_score = self.dropout(Q_K_score) V_att = Q_K_score.bmm(V) return V_att def multi_head_attention(self, Q, K, V): bsz, q_len, _ = Q.size() bsz, k_len, _ = K.size() bsz, v_len, _ = V.size() Q_ = Q.matmul(self.W_q).view(bsz, q_len, self.n_heads, self.d_k) K_ = K.matmul(self.W_k).view(bsz, k_len, self.n_heads, self.d_k) V_ = V.matmul(self.W_v).view(bsz, v_len, self.n_heads, self.d_v) Q_ = Q_.permute(0, 2, 1, 3).contiguous().view(bsz * self.n_heads, q_len, self.d_k) K_ = K_.permute(0, 2, 1, 3).contiguous().view(bsz * self.n_heads, q_len, self.d_k) V_ = V_.permute(0, 2, 1, 3).contiguous().view(bsz * self.n_heads, q_len, self.d_v) V_att = self.scaled_dot_product_attention(Q_, K_, V_) V_att = V_att.view(bsz, self.n_heads, q_len, self.d_v) V_att = V_att.permute(0, 2, 1, 3).contiguous().view(bsz, q_len, self.n_heads * self.d_v) output = self.dropout(V_att.matmul(self.W_o)) return output def forward(self, Q, K, V): """ :param Q: (batch_size, max_q_words, input_size) :param K: (batch_size, max_k_words, input_size) :param V: (batch_size, max_v_words, input_size) :return: output: (batch_size, max_q_words, input_size) same size as Q """ V_att = self.multi_head_attention(Q, K, V) if self.is_layer_norm: X = self.layer_morm(Q + V_att) output = self.layer_morm(self.FFN(X) + X) else: X = Q + V_att output = self.FFN(X) + X return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 % 8 x3 = xindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 128 * x1 + 512 * x3), None) tl.store(out_ptr0 + x4, tmp0, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.24999993750001562 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (16 * (x1 % 4) + 64 * (x0 // 16) + 512 * (x1 // 4) + x0 % 16), None) tl.store(out_ptr0 + x2, tmp0, None) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, 128), (128, 1)) assert_size_stride(primals_6, (4, 128), (128, 1)) assert_size_stride(primals_7, (128, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, out=buf0) del primals_4 buf1 = empty_strided_cuda((16, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_5, out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), primals_6, out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 8, 4, 16), (512, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(2048)](buf0, buf3, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 8, 4, 16), (512, 64, 16, 1), 0) del buf0 triton_poi_fused_clone_0[grid(2048)](buf1, buf4, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (32, 4, 16), (64, 16, 1 ), 0), reinterpret_tensor(buf4, (32, 16, 4), (64, 1, 16), 0), out=buf5) buf6 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(512)](buf5, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(512)](buf6, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 8, 4, 16), (512, 64, 16, 1), 0) del buf1 triton_poi_fused_clone_0[grid(2048)](buf2, buf8, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (32, 4, 16), (64, 16, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (32, 4, 16), (64, 16, 1), 0), out=buf9) buf10 = empty_strided_cuda((16, 128), (128, 1), torch.float32) triton_poi_fused_view_3[grid(2048)](buf9, buf10, 2048, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, primals_7, out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_add_4[grid(64)](buf12, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13) buf14 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0) del buf13 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf14, primals_9, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0) del buf15 triton_poi_fused_add_6[grid(64)](buf16, primals_11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 return buf16, buf7, reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf14, (16, 4), (4, 1), 0 ), primals_10, buf17, primals_8, reinterpret_tensor(buf10, (128, 16 ), (1, 128), 0), reinterpret_tensor(primals_7, (4, 128), (1, 4), 0 ), reinterpret_tensor(buf8, (32, 16, 4), (64, 1, 16), 0 ), reinterpret_tensor(buf3, (32, 16, 4), (64, 1, 16), 0 ), reinterpret_tensor(buf4, (32, 4, 16), (64, 16, 1), 0 ), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0) class TransformerBlockNew(nn.Module): def __init__(self, input_size, d_k=16, d_v=16, n_heads=8, is_layer_norm =False, attn_dropout=0.1): super(TransformerBlockNew, self).__init__() self.n_heads = n_heads self.d_k = d_k if d_k is not None else input_size self.d_v = d_v if d_v is not None else input_size self.is_layer_norm = is_layer_norm if is_layer_norm: self.layer_morm = nn.LayerNorm(normalized_shape=input_size) self.W_q = nn.Parameter(torch.Tensor(input_size, n_heads * d_k)) self.W_k = nn.Parameter(torch.Tensor(input_size, n_heads * d_k)) self.W_v = nn.Parameter(torch.Tensor(input_size, n_heads * d_v)) self.W_o = nn.Parameter(torch.Tensor(d_v * n_heads, input_size)) self.linear1 = nn.Linear(input_size, input_size) self.linear2 = nn.Linear(input_size, input_size) self.dropout = nn.Dropout(attn_dropout) self.__init_weights__() None def __init_weights__(self): init.xavier_normal_(self.W_q) init.xavier_normal_(self.W_k) init.xavier_normal_(self.W_v) init.xavier_normal_(self.W_o) init.xavier_normal_(self.linear1.weight) init.xavier_normal_(self.linear2.weight) def FFN(self, X): output = self.linear2(F.relu(self.linear1(X))) output = self.dropout(output) return output def scaled_dot_product_attention(self, Q, K, V, episilon=1e-06): """ :param Q: (*, max_q_words, n_heads, input_size) :param K: (*, max_k_words, n_heads, input_size) :param V: (*, max_v_words, n_heads, input_size) :param episilon: :return: """ temperature = self.d_k ** 0.5 Q_K = torch.einsum('bqd,bkd->bqk', Q, K) / (temperature + episilon) Q_K_score = F.softmax(Q_K, dim=-1) Q_K_score = self.dropout(Q_K_score) V_att = Q_K_score.bmm(V) return V_att def multi_head_attention(self, Q, K, V): bsz, q_len, _ = Q.size() bsz, k_len, _ = K.size() bsz, v_len, _ = V.size() Q_ = Q.matmul(self.W_q).view(bsz, q_len, self.n_heads, self.d_k) K_ = K.matmul(self.W_k).view(bsz, k_len, self.n_heads, self.d_k) V_ = V.matmul(self.W_v).view(bsz, v_len, self.n_heads, self.d_v) Q_ = Q_.permute(0, 2, 1, 3).contiguous().view(bsz * self.n_heads, q_len, self.d_k) K_ = K_.permute(0, 2, 1, 3).contiguous().view(bsz * self.n_heads, q_len, self.d_k) V_ = V_.permute(0, 2, 1, 3).contiguous().view(bsz * self.n_heads, q_len, self.d_v) V_att = self.scaled_dot_product_attention(Q_, K_, V_) V_att = V_att.view(bsz, self.n_heads, q_len, self.d_v) V_att = V_att.permute(0, 2, 1, 3).contiguous().view(bsz, q_len, self.n_heads * self.d_v) output = self.dropout(V_att.matmul(self.W_o)) return output def forward(self, input_0, input_1, input_2): primals_4 = self.W_q primals_5 = self.W_k primals_6 = self.W_v primals_7 = self.W_o primals_8 = self.linear1.weight primals_9 = self.linear1.bias primals_10 = self.linear2.weight primals_11 = self.linear2.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
bopopescu/covid-19-visualization
TransformerBlock
false
12,193
[ "MIT" ]
0
8a9325b52f007dd5e3ee5bbd323b71bbf19b9640
https://github.com/bopopescu/covid-19-visualization/tree/8a9325b52f007dd5e3ee5bbd323b71bbf19b9640
EltwiseProdScoring
import torch import torch.nn as nn class EltwiseProdScoring(nn.Module): """ Linearly mapping h and v to the same dimension, and do a elementwise multiplication and a linear scoring """ def __init__(self, h_dim, a_dim, dot_dim=256): """Initialize layer.""" super(EltwiseProdScoring, self).__init__() self.linear_in_h = nn.Linear(h_dim, dot_dim, bias=True) self.linear_in_a = nn.Linear(a_dim, dot_dim, bias=True) self.linear_out = nn.Linear(dot_dim, 1, bias=True) def forward(self, h, all_u_t, mask=None): """Propagate h through the network. h: batch x h_dim all_u_t: batch x a_num x a_dim """ target = self.linear_in_h(h).unsqueeze(1) context = self.linear_in_a(all_u_t) eltprod = torch.mul(target, context) logits = self.linear_out(eltprod).squeeze(2) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'h_dim': 4, 'a_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4096 x2 = xindex // 16384 x3 = xindex % 16384 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 4), (4, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 256), (256, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4, 256), (16384, 4096, 1024, 256, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(65536)](buf0, buf1, buf2, 65536, XBLOCK =256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_8 return reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), primals_7 class EltwiseProdScoringNew(nn.Module): """ Linearly mapping h and v to the same dimension, and do a elementwise multiplication and a linear scoring """ def __init__(self, h_dim, a_dim, dot_dim=256): """Initialize layer.""" super(EltwiseProdScoringNew, self).__init__() self.linear_in_h = nn.Linear(h_dim, dot_dim, bias=True) self.linear_in_a = nn.Linear(a_dim, dot_dim, bias=True) self.linear_out = nn.Linear(dot_dim, 1, bias=True) def forward(self, input_0, input_1): primals_1 = self.linear_in_h.weight primals_2 = self.linear_in_h.bias primals_4 = self.linear_in_a.weight primals_5 = self.linear_in_a.bias primals_7 = self.linear_out.weight primals_8 = self.linear_out.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
cacosandon/speaker_follower_with_objects
EltwiseProdScoring
false
12,194
[ "BSD-2-Clause", "MIT" ]
0
f3d454fdbd1c8129887cf4ecc4743d231c7b9555
https://github.com/cacosandon/speaker_follower_with_objects/tree/f3d454fdbd1c8129887cf4ecc4743d231c7b9555
EuclideanMean
import torch from torch import Tensor import torch.utils.data.dataloader from torch import nn import torch.nn class EuclideanMean(nn.Module): """Implement a EuclideanMean object.""" def forward(self, data: 'Tensor') ->Tensor: """Performs a forward pass through the network. Parameters ---------- data : torch.Tensor The input data, as a float tensor Returns ------- torch.Tensor The encoded output, as a float tensor """ return data.mean(0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class EuclideanMeanNew(nn.Module): """Implement a EuclideanMean object.""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
chen-yuxuan/flair
EuclideanMean
false
12,195
[ "MIT" ]
0
480d2c9afd66ab8d3bf40a676917e84dba3c4cee
https://github.com/chen-yuxuan/flair/tree/480d2c9afd66ab8d3bf40a676917e84dba3c4cee
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): attention_scores = torch.matmul(query, key.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attn_value = torch.matmul(attention_probs, value) attn_value = attn_value.transpose(1, 2).contiguous() bs, seq_len = attn_value.shape[:2] attn_value = attn_value.view(bs, seq_len, -1) return attn_value def forward(self, hidden_states, attention_mask): key_layer = self.transform(hidden_states, self.key) value_layer = self.transform(hidden_states, self.value) query_layer = self.transform(hidden_states, self.query) attn_value = self.attention(key_layer, query_layer, value_layer, attention_mask) return attn_value def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(num_attention_heads=4, hidden_size= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf2 triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf1, primals_5, buf10, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf11 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): attention_scores = torch.matmul(query, key.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attn_value = torch.matmul(attention_probs, value) attn_value = attn_value.transpose(1, 2).contiguous() bs, seq_len = attn_value.shape[:2] attn_value = attn_value.view(bs, seq_len, -1) return attn_value def forward(self, input_0, input_1): primals_2 = self.query.weight primals_3 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
brendon-boldt/minbert-assignment
BertSelfAttention
false
12,196
[ "Apache-2.0" ]
0
0b562d791d34a40fd3c0383a0a32b4eeb2171cb5
https://github.com/brendon-boldt/minbert-assignment/tree/0b562d791d34a40fd3c0383a0a32b4eeb2171cb5
PyTorchFeedForward
import torch import torch.nn import torch.autograd import torch.nn as nn import torch.nn.functional as F import torch.optim import torch.cuda class PyTorchFeedForward(nn.Module): def __init__(self, depth, width, input_size, output_size): super(PyTorchFeedForward, self).__init__() self.linears = [nn.Linear(input_size, width)] for i in range(depth - 1): self.linears.append(nn.Linear(width, width)) self.linears.append(nn.Linear(width, output_size)) for i, child in enumerate(self.linears): self.add_module('child%d' % i, child) def forward(self, x): y = F.dropout(F.relu(self.linears[0](x)), self.training) for layer in self.linears[1:-1]: y = F.relu(layer(y)) y = F.dropout(y, self.training) y = F.log_softmax(self.linears[-1](y)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'depth': 1, 'width': 4, 'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn import torch.autograd import torch.nn as nn import torch.optim import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5 class PyTorchFeedForwardNew(nn.Module): def __init__(self, depth, width, input_size, output_size): super(PyTorchFeedForwardNew, self).__init__() self.linears = [nn.Linear(input_size, width)] for i in range(depth - 1): self.linears.append(nn.Linear(width, width)) self.linears.append(nn.Linear(width, output_size)) for i, child in enumerate(self.linears): self.add_module('child%d' % i, child) def forward(self, input_0): primals_1 = self.child0.weight primals_2 = self.child0.bias primals_4 = self.child1.weight primals_5 = self.child1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ccoulombe/thinc
PyTorchFeedForward
false
12,197
[ "MIT" ]
0
8d891b61ddef3ca00266ca0ec7c47e2d063a3a83
https://github.com/ccoulombe/thinc/tree/8d891b61ddef3ca00266ca0ec7c47e2d063a3a83
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.a1 = nn.Conv2d(5, 16, kernel_size=3, padding=1) self.a2 = nn.Conv2d(16, 16, kernel_size=3, padding=1) self.a3 = nn.Conv2d(16, 32, kernel_size=3, stride=2) self.b1 = nn.Conv2d(32, 32, kernel_size=3, padding=1) self.b2 = nn.Conv2d(32, 32, kernel_size=3, padding=1) self.b3 = nn.Conv2d(32, 64, kernel_size=3, stride=2) self.c1 = nn.Conv2d(64, 64, kernel_size=2, padding=1) self.c2 = nn.Conv2d(64, 64, kernel_size=2, padding=1) self.c3 = nn.Conv2d(64, 128, kernel_size=2, stride=2) self.d1 = nn.Conv2d(128, 128, kernel_size=1) self.d2 = nn.Conv2d(128, 128, kernel_size=1) self.d3 = nn.Conv2d(128, 128, kernel_size=1) self.last = nn.Linear(128, 1) def forward(self, x): x = F.relu(self.a1(x)) x = F.relu(self.a2(x)) x = F.relu(self.a3(x)) x = F.relu(self.b1(x)) x = F.relu(self.b2(x)) x = F.relu(self.b3(x)) x = F.relu(self.c1(x)) x = F.relu(self.c2(x)) x = F.relu(self.c3(x)) x = F.relu(self.d1(x)) x = F.relu(self.d2(x)) x = F.relu(self.d3(x)) x = x.view(-1, 128) x = self.last(x) return F.tanh(x) def get_inputs(): return [torch.rand([4, 5, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 80 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 5 y1 = yindex // 5 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 5 * x2 + 45 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 20 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 5 y1 = yindex // 5 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 5 * x2 + 20480 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 73984 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 128 y1 = yindex // 128 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 128 * x2 + 8192 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_tanh_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (16, 5, 3, 3), (45, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 5, 64, 64), (20480, 4096, 64, 1)) assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_23, (128,), (1,)) assert_size_stride(primals_24, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (1, 128), (128, 1)) assert_size_stride(primals_27, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 5, 3, 3), (45, 1, 15, 5), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(80, 9)](primals_1, buf0, 80, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 5, 64, 64), (20480, 1, 320, 5), torch .float32) triton_poi_fused_1[grid(20, 4096)](primals_3, buf1, 20, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch. float32) triton_poi_fused_2[grid(256, 9)](primals_4, buf2, 256, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch. float32) triton_poi_fused_3[grid(512, 9)](primals_6, buf3, 512, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_4[grid(1024, 9)](primals_8, buf4, 1024, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_4[grid(1024, 9)](primals_10, buf5, 1024, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_5[grid(2048, 9)](primals_12, buf6, 2048, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((64, 64, 2, 2), (256, 1, 128, 64), torch. float32) triton_poi_fused_6[grid(4096, 4)](primals_14, buf7, 4096, 4, XBLOCK =4, YBLOCK=256, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((64, 64, 2, 2), (256, 1, 128, 64), torch. float32) triton_poi_fused_6[grid(4096, 4)](primals_16, buf8, 4096, 4, XBLOCK =4, YBLOCK=256, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch .float32) triton_poi_fused_7[grid(8192, 4)](primals_18, buf9, 8192, 4, XBLOCK =4, YBLOCK=256, num_warps=4, num_stages=1) del primals_18 buf10 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 64, 64), (65536, 1, 1024, 16)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_8[grid(262144)](buf11, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf12 = extern_kernels.convolution(buf11, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 16, 64, 64), (65536, 1, 1024, 16)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_8[grid(262144)](buf13, primals_5, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf14 = extern_kernels.convolution(buf13, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 32, 31, 31), (30752, 1, 992, 32)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_9[grid(123008)](buf15, primals_7, 123008, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf16 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 32, 31, 31), (30752, 1, 992, 32)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_9[grid(123008)](buf17, primals_9, 123008, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf18 = extern_kernels.convolution(buf17, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 32, 31, 31), (30752, 1, 992, 32)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_9[grid(123008)](buf19, primals_11, 123008, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf20 = extern_kernels.convolution(buf19, buf6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 15, 15), (14400, 1, 960, 64)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_10[grid(57600)](buf21, primals_13, 57600, XBLOCK=512, num_warps=4, num_stages=1) del primals_13 buf22 = extern_kernels.convolution(buf21, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(65536)](buf23, primals_15, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 17, 17), (18496, 1, 1088, 64)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_12[grid(73984)](buf25, primals_17, 73984, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf26 = extern_kernels.convolution(buf25, buf9, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_13[grid(32768)](buf27, primals_19, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf28 = extern_kernels.convolution(buf27, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_13[grid(32768)](buf29, primals_21, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 buf30 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_13[grid(32768)](buf31, primals_23, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf32 = extern_kernels.convolution(buf31, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf33 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) buf36 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_14[grid(512, 64)]( buf32, primals_25, buf33, buf36, 512, 64, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf32 del primals_25 buf34 = empty_strided_cuda((256, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf33, (256, 128), (128, 1), 0 ), reinterpret_tensor(primals_26, (128, 1), (1, 128), 0), out=buf34 ) buf35 = buf34 del buf34 triton_poi_fused_tanh_15[grid(256)](buf35, primals_27, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_27 return (buf35, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, primals_20, primals_22, primals_24, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, reinterpret_tensor(buf33, (256, 128), (128, 1), 0), buf35, primals_26, buf36) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.a1 = nn.Conv2d(5, 16, kernel_size=3, padding=1) self.a2 = nn.Conv2d(16, 16, kernel_size=3, padding=1) self.a3 = nn.Conv2d(16, 32, kernel_size=3, stride=2) self.b1 = nn.Conv2d(32, 32, kernel_size=3, padding=1) self.b2 = nn.Conv2d(32, 32, kernel_size=3, padding=1) self.b3 = nn.Conv2d(32, 64, kernel_size=3, stride=2) self.c1 = nn.Conv2d(64, 64, kernel_size=2, padding=1) self.c2 = nn.Conv2d(64, 64, kernel_size=2, padding=1) self.c3 = nn.Conv2d(64, 128, kernel_size=2, stride=2) self.d1 = nn.Conv2d(128, 128, kernel_size=1) self.d2 = nn.Conv2d(128, 128, kernel_size=1) self.d3 = nn.Conv2d(128, 128, kernel_size=1) self.last = nn.Linear(128, 1) def forward(self, input_0): primals_1 = self.a1.weight primals_2 = self.a1.bias primals_4 = self.a2.weight primals_5 = self.a2.bias primals_6 = self.a3.weight primals_7 = self.a3.bias primals_8 = self.b1.weight primals_9 = self.b1.bias primals_10 = self.b2.weight primals_11 = self.b2.bias primals_12 = self.b3.weight primals_13 = self.b3.bias primals_14 = self.c1.weight primals_15 = self.c1.bias primals_16 = self.c2.weight primals_17 = self.c2.bias primals_18 = self.c3.weight primals_19 = self.c3.bias primals_20 = self.d1.weight primals_21 = self.d1.bias primals_22 = self.d2.weight primals_23 = self.d2.bias primals_24 = self.d3.weight primals_25 = self.d3.bias primals_26 = self.last.weight primals_27 = self.last.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
blockide/Chess-ML
Net
false
12,198
[ "MIT" ]
0
3b1572f715ed710f5ce240c76bb79ae8f186f32a
https://github.com/blockide/Chess-ML/tree/3b1572f715ed710f5ce240c76bb79ae8f186f32a
FC_Q
import torch import torch.nn as nn import torch.nn.functional as F class FC_Q(nn.Module): def __init__(self, state_dim, num_actions): super(FC_Q, self).__init__() self.q1 = nn.Linear(state_dim, 256) self.q2 = nn.Linear(256, 256) self.q3 = nn.Linear(256, num_actions) self.i1 = nn.Linear(state_dim, 256) self.i2 = nn.Linear(256, 256) self.i3 = nn.Linear(256, num_actions) def forward(self, state): q = F.relu(self.q1(state)) q = F.relu(self.q2(q)) i = F.relu(self.i1(state)) i = F.relu(self.i2(i)) i = F.relu(self.i3(i)) return self.q3(q) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 4), (4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256), (256, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 256), (256, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 256), (256, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3, primals_5, buf5, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_12, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_13 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), primals_12, buf5, primals_4, buf6 class FC_QNew(nn.Module): def __init__(self, state_dim, num_actions): super(FC_QNew, self).__init__() self.q1 = nn.Linear(state_dim, 256) self.q2 = nn.Linear(256, 256) self.q3 = nn.Linear(256, num_actions) self.i1 = nn.Linear(state_dim, 256) self.i2 = nn.Linear(256, 256) self.i3 = nn.Linear(256, num_actions) def forward(self, input_0): primals_1 = self.q1.weight primals_2 = self.q1.bias primals_4 = self.q2.weight primals_5 = self.q2.bias primals_10 = self.q3.weight primals_11 = self.q3.bias primals_6 = self.i1.weight primals_7 = self.i1.bias primals_8 = self.i2.weight primals_9 = self.i2.bias primals_12 = self.i3.weight primals_13 = self.i3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
cedesu/BCQ
FC_Q
false
12,199
[ "MIT" ]
0
424548510349a85c31809431494dcc6f64b611ba
https://github.com/cedesu/BCQ/tree/424548510349a85c31809431494dcc6f64b611ba
VAE
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Decoder(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(Decoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.unsqueeze(-1).unsqueeze(-1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) reconstruction = F.sigmoid(self.deconv4(x)) return reconstruction class Encoder(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(Encoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma class VAE(nn.Module): """ Variational Autoencoder """ def __init__(self, img_channels, latent_size): super(VAE, self).__init__() self.encoder = Encoder(img_channels, latent_size) self.decoder = Decoder(img_channels, latent_size) def forward(self, x): mu, logsigma = self.encoder(x) sigma = logsigma.exp() eps = torch.randn_like(sigma) z = eps.mul(sigma).add_(mu) recon_x = self.decoder(z) return recon_x, mu, logsigma def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'img_channels': 4, 'latent_size': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask) @triton.jit def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 1024), (1024, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (1024, 4), (4, 1)) assert_size_stride(primals_15, (1024,), (1,)) assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_21, (32,), (1,)) assert_size_stride(primals_22, (32, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_23, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch .float32) triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch. float32) triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_5[grid(131072, 25)](primals_16, buf5, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_16 buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_6[grid(8192, 25)](primals_18, buf6, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_18 buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch .float32) triton_poi_fused_7[grid(2048, 36)](primals_20, buf7, 2048, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_20 buf8 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32 ) triton_poi_fused_8[grid(128, 36)](primals_22, buf8, 128, 36, XBLOCK =32, YBLOCK=32, num_warps=4, num_stages=1) del primals_22 buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_9[grid(123008)](buf10, primals_2, 123008, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_10[grid(50176)](buf12, primals_5, 50176, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_11[grid(18432)](buf14, primals_7, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256)) buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_12[grid(1024, 4)]( buf15, primals_9, buf16, buf33, 1024, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf17) del primals_11 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf18) del primals_13 buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_exp_mul_13[grid(16)](buf20, buf18, buf17, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1) buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0) del buf15 extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024), (1, 4), 0), out=buf22) buf23 = buf22 del buf22 buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_14[grid(4096)](buf23, primals_15, buf32, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding= (0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_15[grid(12800)](buf25, primals_17, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_16[grid(43264)](buf27, primals_19, 43264, XBLOCK=512, num_warps=4, num_stages=1) del primals_19 buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_17[grid(115200)](buf29, primals_21, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 64, 64), (16384, 1, 256, 4)) buf31 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_18[grid(16, 4096)](buf30, primals_23, buf31, 16, 4096, XBLOCK=512, YBLOCK=1, num_warps=4, num_stages=1) del buf30 del primals_23 return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32, primals_14, primals_12, primals_10, buf33) class Decoder(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(Decoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.unsqueeze(-1).unsqueeze(-1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) reconstruction = F.sigmoid(self.deconv4(x)) return reconstruction class Encoder(nn.Module): """ VAE encoder """ def __init__(self, img_channels, latent_size): super(Encoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma class VAENew(nn.Module): """ Variational Autoencoder """ def __init__(self, img_channels, latent_size): super(VAENew, self).__init__() self.encoder = Encoder(img_channels, latent_size) self.decoder = Decoder(img_channels, latent_size) def forward(self, input_0): primals_1 = self.encoder.conv1.weight primals_2 = self.encoder.conv1.bias primals_4 = self.encoder.conv2.weight primals_5 = self.encoder.conv2.bias primals_6 = self.encoder.conv3.weight primals_7 = self.encoder.conv3.bias primals_8 = self.encoder.conv4.weight primals_9 = self.encoder.conv4.bias primals_10 = self.encoder.fc_mu.weight primals_11 = self.encoder.fc_mu.bias primals_12 = self.encoder.fc_logsigma.weight primals_13 = self.encoder.fc_logsigma.bias primals_14 = self.decoder.fc1.weight primals_15 = self.decoder.fc1.bias primals_16 = self.decoder.deconv1.weight primals_17 = self.decoder.deconv1.bias primals_18 = self.decoder.deconv2.weight primals_19 = self.decoder.deconv2.bias primals_20 = self.decoder.deconv3.weight primals_21 = self.decoder.deconv3.bias primals_22 = self.decoder.deconv4.weight primals_23 = self.decoder.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0], output[1], output[2]
benedictquartey/softgym_wm
VAE
false
12,200
[ "BSD-3-Clause" ]
0
0aef75fed207b11029f6052c656a679c105b4677
https://github.com/benedictquartey/softgym_wm/tree/0aef75fed207b11029f6052c656a679c105b4677
GramMatrix
import torch from torch import nn class GramMatrix(nn.Module): def forward(self, x): b, c, h, w = x.shape F = x.view(-1, c, b * w) G = torch.bmm(F, F.transpose(1, 2)) / (h * w) return G def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0625 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) del arg0_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, class GramMatrixNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
choiking10/Image-Style-Transfer
GramMatrix
false
12,201
[ "MIT" ]
0
cc4a6c22975e16343a0fecfdfd3e707c34905e93
https://github.com/choiking10/Image-Style-Transfer/tree/cc4a6c22975e16343a0fecfdfd3e707c34905e93
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
import torch import torch.nn import torch.onnx class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency(torch .nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency , self).__init__() self.fc1 = torch.nn.Linear(input_size, hidden_size) self.fc2 = torch.nn.Linear(input_size, hidden_size) self.softmax1 = torch.nn.Softmax(dim=1) self.softmax2 = torch.nn.Softmax(dim=1) def forward(self, input1, input2): model_input = input1 + input2 out1 = self.fc1(model_input) out2 = self.fc2(model_input) out1 = self.softmax1(out1) out2 = self.softmax2(out2) return out1, out2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(256)](buf2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 return buf4, buf6, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf4, buf6 class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew( torch.nn.Module): def __init__(self, input_size, hidden_size, num_classes): super( NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew , self).__init__() self.fc1 = torch.nn.Linear(input_size, hidden_size) self.fc2 = torch.nn.Linear(input_size, hidden_size) self.softmax1 = torch.nn.Softmax(dim=1) self.softmax2 = torch.nn.Softmax(dim=1) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
chethanpk/onnxruntime
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
false
12,202
[ "MIT" ]
0
c2435d24ecbeededf1dc50187ab3bd11ad4a6994
https://github.com/chethanpk/onnxruntime/tree/c2435d24ecbeededf1dc50187ab3bd11ad4a6994
ToRGB
import math import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape if style is not None: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) else: style = torch.ones(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_4, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_2[grid(48)](primals_5, buf2, buf3, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf5, primals_2, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape if style is not None: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) else: style = torch.ones(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_4 = self.conv.modulation.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
bomtorazek/contrastive-unpaired-translation
ToRGB
false
12,203
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
TransformerLayer
import torch from torch import nn import torch.nn.functional as nnf from typing import Optional class MlpTransformer(nn.Module): def __init__(self, in_dim, h_dim, out_d: 'Optional[int]'=None, act=nnf. relu, dropout=0.0): super().__init__() out_d = out_d if out_d is not None else in_dim self.fc1 = nn.Linear(in_dim, h_dim) self.act = act self.fc2 = nn.Linear(h_dim, out_d) self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.dropout(x) x = self.fc2(x) x = self.dropout(x) return x class MultiHeadAttention(nn.Module): def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.0): super().__init__() self.num_heads = num_heads head_dim = dim_self // num_heads self.scale = head_dim ** -0.5 self.to_queries = nn.Linear(dim_self, dim_self, bias=bias) self.to_keys_values = nn.Linear(dim_ref, dim_self * 2, bias=bias) self.project = nn.Linear(dim_self, dim_self) self.dropout = nn.Dropout(dropout) def forward(self, x, y=None, mask=None): y = y if y is not None else x b, n, c = x.shape _, m, _d = y.shape queries = self.to_queries(x).reshape(b, n, self.num_heads, c // self.num_heads) keys_values = self.to_keys_values(y).reshape(b, m, 2, self. num_heads, c // self.num_heads) keys, values = keys_values[:, :, 0], keys_values[:, :, 1] attention = torch.einsum('bnhd,bmhd->bnmh', queries, keys) * self.scale if mask is not None: if mask.dim() == 2: mask = mask.unsqueeze(1) attention = attention.masked_fill(mask.unsqueeze(3), float('-inf')) attention = attention.softmax(dim=2) out = torch.einsum('bnmh,bmhd->bnhd', attention, values).reshape(b, n, c) out = self.project(out) return out, attention class TransformerLayer(nn.Module): def forward_with_attention(self, x, y=None, mask=None): x_, attention = self.attn(self.norm1(x), y, mask) x = x + x_ x = x + self.mlp(self.norm2(x)) return x, attention def forward(self, x, y=None, mask=None): x = x + self.attn(self.norm1(x), y, mask)[0] x = x + self.mlp(self.norm2(x)) return x def __init__(self, dim_self, dim_ref, num_heads, mlp_ratio=4.0, bias= False, dropout=0.0, act=nnf.relu, norm_layer: 'nn.Module'=nn.LayerNorm ): super().__init__() self.norm1 = norm_layer(dim_self) self.attn = MultiHeadAttention(dim_self, dim_ref, num_heads, bias= bias, dropout=dropout) self.norm2 = norm_layer(dim_self) self.mlp = MlpTransformer(dim_self, int(dim_self * mlp_ratio), act= act, dropout=dropout) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim_self': 4, 'dim_ref': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as nnf from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp3 tmp8 = triton_helpers.maximum(tmp4, tmp7) tmp10 = tmp0 * tmp9 tmp11 = tmp10 * tmp3 tmp12 = triton_helpers.maximum(tmp8, tmp11) tmp14 = tmp0 * tmp13 tmp15 = tmp14 * tmp3 tmp16 = triton_helpers.maximum(tmp12, tmp15) tmp17 = tmp4 - tmp16 tmp18 = tmp17 * tmp3 tmp19 = tl_math.exp(tmp18) tmp20 = tmp7 - tmp16 tmp21 = tmp20 * tmp3 tmp22 = tl_math.exp(tmp21) tmp23 = tmp19 + tmp22 tmp24 = tmp11 - tmp16 tmp25 = tmp24 * tmp3 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp15 - tmp16 tmp29 = tmp28 * tmp3 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + x3, tmp16, xmask) tl.store(out_ptr1 + x3, tmp31, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x2 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 8 * x0 + 32 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp3 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp10, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 8 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_3[grid(256)](buf3, buf4, buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf4, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf11) buf12 = buf1 del buf1 buf13 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_3, buf11, primals_7, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_3, buf11, primals_7, buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_9 buf15 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 16), (64, 16, 1), 0) del buf15 buf19 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_8[grid(256)](buf16, primals_11, buf19, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 triton_poi_fused_add_9[grid(64)](buf18, primals_3, buf11, primals_7, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 return buf18, primals_3, primals_7, primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (4, 1, 4, 4, 1), (32, 1, 8, 1, 1), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf16, (16, 16), (16, 1), 0 ), primals_12, buf19, primals_10, primals_6, reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), primals_5, primals_4 class MlpTransformer(nn.Module): def __init__(self, in_dim, h_dim, out_d: 'Optional[int]'=None, act=nnf. relu, dropout=0.0): super().__init__() out_d = out_d if out_d is not None else in_dim self.fc1 = nn.Linear(in_dim, h_dim) self.act = act self.fc2 = nn.Linear(h_dim, out_d) self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.dropout(x) x = self.fc2(x) x = self.dropout(x) return x class MultiHeadAttention(nn.Module): def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.0): super().__init__() self.num_heads = num_heads head_dim = dim_self // num_heads self.scale = head_dim ** -0.5 self.to_queries = nn.Linear(dim_self, dim_self, bias=bias) self.to_keys_values = nn.Linear(dim_ref, dim_self * 2, bias=bias) self.project = nn.Linear(dim_self, dim_self) self.dropout = nn.Dropout(dropout) def forward(self, x, y=None, mask=None): y = y if y is not None else x b, n, c = x.shape _, m, _d = y.shape queries = self.to_queries(x).reshape(b, n, self.num_heads, c // self.num_heads) keys_values = self.to_keys_values(y).reshape(b, m, 2, self. num_heads, c // self.num_heads) keys, values = keys_values[:, :, 0], keys_values[:, :, 1] attention = torch.einsum('bnhd,bmhd->bnmh', queries, keys) * self.scale if mask is not None: if mask.dim() == 2: mask = mask.unsqueeze(1) attention = attention.masked_fill(mask.unsqueeze(3), float('-inf')) attention = attention.softmax(dim=2) out = torch.einsum('bnmh,bmhd->bnhd', attention, values).reshape(b, n, c) out = self.project(out) return out, attention class TransformerLayerNew(nn.Module): def forward_with_attention(self, x, y=None, mask=None): x_, attention = self.attn(self.norm1(x), y, mask) x = x + x_ x = x + self.mlp(self.norm2(x)) return x, attention def __init__(self, dim_self, dim_ref, num_heads, mlp_ratio=4.0, bias= False, dropout=0.0, act=nnf.relu, norm_layer: 'nn.Module'=nn.LayerNorm ): super().__init__() self.norm1 = norm_layer(dim_self) self.attn = MultiHeadAttention(dim_self, dim_ref, num_heads, bias= bias, dropout=dropout) self.norm2 = norm_layer(dim_self) self.mlp = MlpTransformer(dim_self, int(dim_self * mlp_ratio), act= act, dropout=dropout) def forward(self, input_0): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_4 = self.attn.to_queries.weight primals_5 = self.attn.to_keys_values.weight primals_6 = self.attn.project.weight primals_7 = self.attn.project.bias primals_8 = self.norm2.weight primals_9 = self.norm2.bias primals_10 = self.mlp.fc1.weight primals_11 = self.mlp.fc1.bias primals_12 = self.mlp.fc2.weight primals_13 = self.mlp.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
bpiyush/CLIP_prefix_caption-video
TransformerLayer
false
12,204
[ "MIT" ]
0
3f6a4b8c841189e20b82fd4de127681424311599
https://github.com/bpiyush/CLIP_prefix_caption-video/tree/3f6a4b8c841189e20b82fd4de127681424311599
LogitCosineDistance
import torch import torch.utils.data.dataloader import torch.nn def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class LogitCosineDistance(torch.nn.Module): def forward(self, a, b): return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_logit_mul_rsub_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp1 - tmp2 tmp4 = -1.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 2.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tmp7 / tmp9 tmp11 = tl_math.log(tmp10) tl.store(in_out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf0 del buf1 buf3 = buf2 del buf2 triton_poi_fused_logit_mul_rsub_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class LogitCosineDistanceNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chen-yuxuan/flair
LogitCosineDistance
false
12,205
[ "MIT" ]
0
480d2c9afd66ab8d3bf40a676917e84dba3c4cee
https://github.com/chen-yuxuan/flair/tree/480d2c9afd66ab8d3bf40a676917e84dba3c4cee
EuclideanDistance
import torch from torch import Tensor import torch.utils.data.dataloader from torch import nn import torch.nn def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class EuclideanDistance(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ _dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range( mat_2.size(0))] dist = torch.stack(_dist, dim=1) return dist def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x0 = xindex % 4 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp10 = tl.load(in_ptr1 + (16 + x0 + 4 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp14 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp15 = tl.load(in_ptr1 + (32 + x0 + 4 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.load(in_ptr0 + (48 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (48 + x0 + 4 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp18 + tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp4, tmp23, tmp24) tmp26 = tmp0 >= tmp3 tmp27 = tl.full([1], 8, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (64 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp30 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp35 = tl.load(in_ptr1 + (80 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp36 tmp38 = tmp33 + tmp37 tmp39 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp40 = tl.load(in_ptr1 + (96 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tmp39 - tmp40 tmp42 = tmp41 * tmp41 tmp43 = tmp38 + tmp42 tmp44 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp45 = tl.load(in_ptr1 + (112 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp44 - tmp45 tmp47 = tmp46 * tmp46 tmp48 = tmp43 + tmp47 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp29, tmp48, tmp49) tmp51 = tmp0 >= tmp27 tmp52 = tl.full([1], 12, tl.int64) tmp53 = tmp0 < tmp52 tmp54 = tmp51 & tmp53 tmp55 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + (128 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp55 - tmp56 tmp58 = tmp57 * tmp57 tmp59 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp60 = tl.load(in_ptr1 + (144 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp59 - tmp60 tmp62 = tmp61 * tmp61 tmp63 = tmp58 + tmp62 tmp64 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp65 = tl.load(in_ptr1 + (160 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp66 = tmp64 - tmp65 tmp67 = tmp66 * tmp66 tmp68 = tmp63 + tmp67 tmp69 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp70 = tl.load(in_ptr1 + (176 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp71 = tmp69 - tmp70 tmp72 = tmp71 * tmp71 tmp73 = tmp68 + tmp72 tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype) tmp75 = tl.where(tmp54, tmp73, tmp74) tmp76 = tmp0 >= tmp52 tl.full([1], 16, tl.int64) tmp79 = tl.load(in_ptr0 + (x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp80 = tl.load(in_ptr1 + (192 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp81 = tmp79 - tmp80 tmp82 = tmp81 * tmp81 tmp83 = tl.load(in_ptr0 + (16 + x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp84 = tl.load(in_ptr1 + (208 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tmp83 - tmp84 tmp86 = tmp85 * tmp85 tmp87 = tmp82 + tmp86 tmp88 = tl.load(in_ptr0 + (32 + x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp89 = tl.load(in_ptr1 + (224 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp90 = tmp88 - tmp89 tmp91 = tmp90 * tmp90 tmp92 = tmp87 + tmp91 tmp93 = tl.load(in_ptr0 + (48 + x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp94 = tl.load(in_ptr1 + (240 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp95 = tmp93 - tmp94 tmp96 = tmp95 * tmp95 tmp97 = tmp92 + tmp96 tmp98 = tl.full(tmp97.shape, 0.0, tmp97.dtype) tmp99 = tl.where(tmp76, tmp97, tmp98) tmp100 = tl.where(tmp54, tmp75, tmp99) tmp101 = tl.where(tmp29, tmp50, tmp100) tmp102 = tl.where(tmp4, tmp25, tmp101) tl.store(out_ptr0 + x3, tmp102, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class EuclideanDistanceNew(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chen-yuxuan/flair
EuclideanDistance
false
12,206
[ "MIT" ]
0
480d2c9afd66ab8d3bf40a676917e84dba3c4cee
https://github.com/chen-yuxuan/flair/tree/480d2c9afd66ab8d3bf40a676917e84dba3c4cee
NetworkExtension
import torch import torch.utils import torch import torch.nn as nn class NetworkExtension(nn.Module): def __init__(self, orig_num_classes, num_classes, auxiliary): super(NetworkExtension, self).__init__() self._auxiliary = auxiliary self.classifier = nn.Linear(orig_num_classes, num_classes) def forward(self, logits_logits_aux): logits = logits_logits_aux[0] logits_aux = logits_logits_aux[1] if self._auxiliary and self.training: logits_aux = torch.sigmoid(self.classifier(logits_aux)) logits = torch.sigmoid(self.classifier(logits)) return logits, logits_aux def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'orig_num_classes': 4, 'num_classes': 4, 'auxiliary': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_1, (4, 4, 4), (16, 4, 1), 64 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf1 class NetworkExtensionNew(nn.Module): def __init__(self, orig_num_classes, num_classes, auxiliary): super(NetworkExtensionNew, self).__init__() self._auxiliary = auxiliary self.classifier = nn.Linear(orig_num_classes, num_classes) def forward(self, input_0): primals_2 = self.classifier.weight primals_3 = self.classifier.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
amelieEmily/RobustDARTS
NetworkExtension
false
12,207
[ "Apache-2.0" ]
0
b26e127c6e9c330258786f5eb77b17d367f546ff
https://github.com/amelieEmily/RobustDARTS/tree/b26e127c6e9c330258786f5eb77b17d367f546ff
SelfAttn
import torch from torch import nn from torch.nn import functional as F class SelfAttn(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, inp): scores = F.softmax(self.scorer(inp), dim=1) cont = scores.transpose(1, 2).bmm(inp).squeeze(1) return cont def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dhid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), primals_3, out=buf4) del buf3 return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), primals_3, buf1 class SelfAttnNew(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, input_0): primals_1 = self.scorer.weight primals_2 = self.scorer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
caisarl76/alfred
SelfAttn
false
12,208
[ "MIT" ]
0
b73bdc1651e14c02440938b639fa3c7f3ab3d321
https://github.com/caisarl76/alfred/tree/b73bdc1651e14c02440938b639fa3c7f3ab3d321
BinaryDiceLoss
import torch import torch.nn as nn class BinaryDiceLoss(nn.Module): def __init__(self): super(BinaryDiceLoss, self).__init__() def forward(self, input, targets): N = targets.size()[0] smooth = 1 input_flat = input.view(N, -1) targets_flat = targets.view(N, -1) intersection = input_flat * targets_flat N_dice_eff = (2 * intersection.sum(1) + smooth) / (input_flat.sum(1 ) + targets_flat.sum(1) + smooth) loss = 1 - N_dice_eff.sum() / N return loss def dice(self, prec, label): smooth = 1 input_flat = prec.view(1, -1) targets_flat = label.view(1, -1) intersection = input_flat * targets_flat d = (2 * intersection.sum(1) + smooth) / (input_flat.sum(1) + targets_flat.sum(1) + smooth) return d.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) tl.store(out_ptr2 + x0, tmp14, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp3 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class BinaryDiceLossNew(nn.Module): def __init__(self): super(BinaryDiceLossNew, self).__init__() def dice(self, prec, label): smooth = 1 input_flat = prec.view(1, -1) targets_flat = label.view(1, -1) intersection = input_flat * targets_flat d = (2 * intersection.sum(1) + smooth) / (input_flat.sum(1) + targets_flat.sum(1) + smooth) return d.sum() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chenkarl/kits19
BinaryDiceLoss
false
12,209
[ "MIT" ]
0
7fa912320a23c6bf649566a1509aa493656b24c1
https://github.com/chenkarl/kits19/tree/7fa912320a23c6bf649566a1509aa493656b24c1
ReshapeF
import torch import torch.utils.data import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out class ReshapeF(nn.Module): def __init__(self): super(ReshapeF, self).__init__() model = [nn.AdaptiveAvgPool2d(4)] self.model = nn.Sequential(*model) self.l2norm = Normalize(2) def forward(self, x): x = self.model(x) x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2) return self.l2norm(x_reshape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_pow_sum_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (y0 // 16) + y0 % 16), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + 64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + 64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + 64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x1 + 4 * y0), tmp15, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sum_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 return buf0, class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out class ReshapeFNew(nn.Module): def __init__(self): super(ReshapeFNew, self).__init__() model = [nn.AdaptiveAvgPool2d(4)] self.model = nn.Sequential(*model) self.l2norm = Normalize(2) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bomtorazek/contrastive-unpaired-translation
ReshapeF
false
12,210
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
VAE
import torch import torch.nn as nn import torch.nn.functional as F class VAE(nn.Module): def __init__(self, state_dim, action_dim, latent_dim, max_action, device): super(VAE, self).__init__() self.e1 = nn.Linear(state_dim + action_dim, 750) self.e2 = nn.Linear(750, 750) self.mean = nn.Linear(750, latent_dim) self.log_std = nn.Linear(750, latent_dim) self.d1 = nn.Linear(state_dim + latent_dim, 750) self.d2 = nn.Linear(750, 750) self.d3 = nn.Linear(750, action_dim) self.max_action = max_action self.latent_dim = latent_dim self.device = device def forward(self, state, action): z = F.relu(self.e1(torch.cat([state, action], 1))) z = F.relu(self.e2(z)) mean = self.mean(z) log_std = self.log_std(z).clamp(-4, 15) std = torch.exp(log_std) z = mean + std * torch.randn_like(std) u = self.decode(state, z) return u, mean, std def decode(self, state, z=None): if z is None: z = torch.randn((state.shape[0], self.latent_dim)).clamp(-0.5, 0.5) a = F.relu(self.d1(torch.cat([state, z], 1))) a = F.relu(self.d2(a)) return self.max_action * torch.tanh(self.d3(a)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'latent_dim': 4, 'max_action': 4, 'device': 0}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 3000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 750 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clamp_exp_ge_le_logical_and_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -4.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 >= tmp3 tmp9 = tmp2 <= tmp5 tmp10 = tmp8 & tmp9 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr3 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (750, 8), (8, 1)) assert_size_stride(primals_4, (750,), (1,)) assert_size_stride(primals_5, (750, 750), (750, 1)) assert_size_stride(primals_6, (750,), (1,)) assert_size_stride(primals_7, (4, 750), (750, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 750), (750, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (750, 8), (8, 1)) assert_size_stride(primals_12, (750,), (1,)) assert_size_stride(primals_13, (750, 750), (750, 1)) assert_size_stride(primals_14, (750,), (1,)) assert_size_stride(primals_15, (4, 750), (750, 1)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 750), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(3000)](buf2, primals_4, 3000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (750, 750), ( 1, 750), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(3000)](buf4, primals_6, 3000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (750, 4), (1, 750), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_9, (750, 4), (1, 750), 0), out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_clamp_exp_ge_le_logical_and_2[grid(16)](buf6, primals_10, buf7, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf8 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_3[grid(32)](primals_1, buf5, buf7, buf9, buf10, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf11 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (8, 750), ( 1, 8), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_1[grid(3000)](buf12, primals_12, 3000, XBLOCK =128, num_warps=4, num_stages=1) del primals_12 buf13 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (750, 750), (1, 750), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_1[grid(3000)](buf14, primals_14, 3000, XBLOCK =128, num_warps=4, num_stages=1) del primals_14 buf15 = buf6 del buf6 extern_kernels.addmm(primals_16, buf14, reinterpret_tensor( primals_15, (750, 4), (1, 750), 0), alpha=1, beta=1, out=buf15) del primals_16 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_tanh_4[grid(16)](buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) return (buf16, buf5, buf7, buf0, buf2, buf4, buf7, buf9, buf10, buf12, buf14, buf15, primals_15, primals_13, primals_11, buf17, primals_9, primals_7, primals_5) class VAENew(nn.Module): def __init__(self, state_dim, action_dim, latent_dim, max_action, device): super(VAENew, self).__init__() self.e1 = nn.Linear(state_dim + action_dim, 750) self.e2 = nn.Linear(750, 750) self.mean = nn.Linear(750, latent_dim) self.log_std = nn.Linear(750, latent_dim) self.d1 = nn.Linear(state_dim + latent_dim, 750) self.d2 = nn.Linear(750, 750) self.d3 = nn.Linear(750, action_dim) self.max_action = max_action self.latent_dim = latent_dim self.device = device def decode(self, state, z=None): if z is None: z = torch.randn((state.shape[0], self.latent_dim)).clamp(-0.5, 0.5) a = F.relu(self.d1(torch.cat([state, z], 1))) a = F.relu(self.d2(a)) return self.max_action * torch.tanh(self.d3(a)) def forward(self, input_0, input_1): primals_3 = self.e1.weight primals_4 = self.e1.bias primals_5 = self.e2.weight primals_6 = self.e2.bias primals_7 = self.mean.weight primals_8 = self.mean.bias primals_9 = self.log_std.weight primals_10 = self.log_std.bias primals_11 = self.d1.weight primals_12 = self.d1.bias primals_13 = self.d2.weight primals_14 = self.d2.bias primals_15 = self.d3.weight primals_16 = self.d3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0], output[1], output[2]
cedesu/BCQ
VAE
false
12,211
[ "MIT" ]
0
424548510349a85c31809431494dcc6f64b611ba
https://github.com/cedesu/BCQ/tree/424548510349a85c31809431494dcc6f64b611ba
Conv_Q
import torch import torch.nn as nn import torch.nn.functional as F class Conv_Q(nn.Module): def __init__(self, frames, num_actions): super(Conv_Q, self).__init__() self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4) self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.q1 = nn.Linear(3136, 512) self.q2 = nn.Linear(512, num_actions) self.i1 = nn.Linear(3136, 512) self.i2 = nn.Linear(512, num_actions) def forward(self, state): c = F.relu(self.c1(state)) c = F.relu(self.c2(c)) c = F.relu(self.c3(c)) q = F.relu(self.q1(c.reshape(-1, 3136))) i = F.relu(self.i1(c.reshape(-1, 3136))) i = self.i2(i) return self.q2(q) def get_inputs(): return [torch.rand([4, 4, 144, 144])] def get_init_inputs(): return [[], {'frames': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1225 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (512, 3136), (3136, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (512, 3136), (3136, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (4, 512), (512, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 512), (512, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2, 156800, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 14, 14), (12544, 196, 14, 1)) buf5 = buf4 del buf4 buf9 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(50176)]( buf5, primals_7, buf9, 50176, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_7 buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0 ), reinterpret_tensor(primals_8, (3136, 512), (1, 3136), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_3[grid(8192)](buf7, primals_9, 8192, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, buf7, reinterpret_tensor( primals_14, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf8) del primals_15 return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf3, reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0), buf7, primals_14, primals_8, buf9) class Conv_QNew(nn.Module): def __init__(self, frames, num_actions): super(Conv_QNew, self).__init__() self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4) self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.q1 = nn.Linear(3136, 512) self.q2 = nn.Linear(512, num_actions) self.i1 = nn.Linear(3136, 512) self.i2 = nn.Linear(512, num_actions) def forward(self, input_0): primals_1 = self.c1.weight primals_2 = self.c1.bias primals_4 = self.c2.weight primals_5 = self.c2.bias primals_6 = self.c3.weight primals_7 = self.c3.bias primals_8 = self.q1.weight primals_9 = self.q1.bias primals_12 = self.q2.weight primals_13 = self.q2.bias primals_10 = self.i1.weight primals_11 = self.i1.bias primals_14 = self.i2.weight primals_15 = self.i2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
cedesu/BCQ
Conv_Q
false
12,212
[ "MIT" ]
0
424548510349a85c31809431494dcc6f64b611ba
https://github.com/cedesu/BCQ/tree/424548510349a85c31809431494dcc6f64b611ba
OutputTransition
import torch import torch.nn as nn class OutputTransition(nn.Module): def __init__(self, out_ch): super(OutputTransition, self).__init__() self.up_conv = nn.Conv2d(64, out_ch, 1) def forward(self, x): out = self.up_conv(x) return out def get_inputs(): return [torch.rand([4, 64, 64, 64])] def get_init_inputs(): return [[], {'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(65536)](buf1, primals_2, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class OutputTransitionNew(nn.Module): def __init__(self, out_ch): super(OutputTransitionNew, self).__init__() self.up_conv = nn.Conv2d(64, out_ch, 1) def forward(self, input_0): primals_1 = self.up_conv.weight primals_2 = self.up_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
chenkarl/kits19
OutputTransition
false
12,213
[ "MIT" ]
0
7fa912320a23c6bf649566a1509aa493656b24c1
https://github.com/chenkarl/kits19/tree/7fa912320a23c6bf649566a1509aa493656b24c1
RNN
import torch import torch.nn as nn from torch.autograd import Variable class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) self.softmax = nn.LogSoftmax() def forward(self, input, hidden): combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) output = self.i2o(combined) output = self.softmax(output) return output, hidden def initHidden(self): return Variable(torch.zeros(1, self.hidden_size)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 return buf4, buf1, buf0, buf4 class RNNNew(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNNNew, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) self.softmax = nn.LogSoftmax() def initHidden(self): return Variable(torch.zeros(1, self.hidden_size)) def forward(self, input_0, input_1): primals_3 = self.i2h.weight primals_4 = self.i2h.bias primals_5 = self.i2o.weight primals_6 = self.i2o.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
chenyuntc/practical-pytorch
RNN
false
12,214
[ "MIT" ]
0
42cbde5275d37bf3f3623a85fd71f13069d95089
https://github.com/chenyuntc/practical-pytorch/tree/42cbde5275d37bf3f3623a85fd71f13069d95089
TripletLoss
import torch from torch import nn import torch.nn.functional as F from torch.nn import * from torch.optim.lr_scheduler import * def _batch_hard(mat_distance, mat_similarity, indice=False): sorted_mat_distance, positive_indices = torch.sort(mat_distance + - 9999999.0 * (1 - mat_similarity), dim=1, descending=True) hard_p = sorted_mat_distance[:, 0] hard_p_indice = positive_indices[:, 0] sorted_mat_distance, negative_indices = torch.sort(mat_distance + 9999999.0 * mat_similarity, dim=1, descending=False) hard_n = sorted_mat_distance[:, 0] hard_n_indice = negative_indices[:, 0] if indice: return hard_p, hard_n, hard_p_indice, hard_n_indice return hard_p, hard_n def euclidean_dist(x, y): m, n = x.size(0), y.size(0) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() dist = xx + yy dist.addmm_(1, -2, x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist class TripletLoss(nn.Module): """ Compute Triplet loss augmented with Batch Hard Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification' """ def __init__(self, margin, normalize_feature=False): super(TripletLoss, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.margin_loss = nn.MarginRankingLoss(margin=margin) def forward(self, emb, label): if self.normalize_feature: emb = F.normalize(emb) mat_dist = euclidean_dist(emb, emb) assert mat_dist.size(0) == mat_dist.size(1) N = mat_dist.size(0) mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float() dist_ap, dist_an = _batch_hard(mat_dist, mat_sim) assert dist_an.size(0) == dist_ap.size(0) y = torch.ones_like(dist_ap) loss = self.margin_loss(dist_an, dist_ap, y) prec = (dist_an.data > dist_ap.data).sum() * 1.0 / y.size(0) return loss, prec def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp29 = tl.load(in_ptr1 + (x0 + 4 * r1), xmask, other=0.0) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = tmp11 + tmp22 tmp24 = tmp0 + tmp23 tmp25 = 1e-12 tmp26 = triton_helpers.maximum(tmp24, tmp25) tmp27 = libdevice.sqrt(tmp26) tmp30 = tmp28 == tmp29 tmp31 = tmp30.to(tl.float32) tmp32 = 1.0 tmp33 = tmp32 - tmp31 tmp34 = -9999999.0 tmp35 = tmp33 * tmp34 tmp36 = tmp27 + tmp35 tmp37 = r1 tmp38 = tmp37.to(tl.int16) tmp39 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp40 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41, _tmp42 = triton_helpers.sort_with_index(tmp39, tmp40, None, 1, stable=False, descending=True) tmp43 = 9999999.0 tmp44 = tmp31 * tmp43 tmp45 = tmp27 + tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp47, _tmp48 = triton_helpers.sort_with_index(tmp46, tmp40, None, 1, stable=False, descending=False) tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp24, xmask) tl.store(out_ptr0 + (r1 + 4 * x0), tmp41, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp47, xmask) @triton.jit def triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 4.0 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = tmp0 > tmp1 tmp13 = tmp12.to(tl.int64) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp16.to(tl.float32) tmp18 = 1.0 tmp19 = tmp17 * tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tmp22 = tmp11 / tmp5 tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0[grid(4)]( buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 buf6 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) buf8 = buf6 del buf6 triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1[grid(1)]( buf8, buf4, buf2, buf9, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf4 return buf8, buf9 def _batch_hard(mat_distance, mat_similarity, indice=False): sorted_mat_distance, positive_indices = torch.sort(mat_distance + - 9999999.0 * (1 - mat_similarity), dim=1, descending=True) hard_p = sorted_mat_distance[:, 0] hard_p_indice = positive_indices[:, 0] sorted_mat_distance, negative_indices = torch.sort(mat_distance + 9999999.0 * mat_similarity, dim=1, descending=False) hard_n = sorted_mat_distance[:, 0] hard_n_indice = negative_indices[:, 0] if indice: return hard_p, hard_n, hard_p_indice, hard_n_indice return hard_p, hard_n def euclidean_dist(x, y): m, n = x.size(0), y.size(0) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() dist = xx + yy dist.addmm_(1, -2, x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist class TripletLossNew(nn.Module): """ Compute Triplet loss augmented with Batch Hard Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification' """ def __init__(self, margin, normalize_feature=False): super(TripletLossNew, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.margin_loss = nn.MarginRankingLoss(margin=margin) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
chrizandr/MMT
TripletLoss
false
12,215
[ "MIT" ]
0
e2bb5984efb165e7ea1ed6080610cfe176344ac0
https://github.com/chrizandr/MMT/tree/e2bb5984efb165e7ea1ed6080610cfe176344ac0
SmallMnistNoDropout
import torch import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class SmallMnistNoDropout(nn.Module): def __init__(self): super(SmallMnistNoDropout, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.relu2 = nn.ReLU() self.fc1 = nn.Linear(320, 50) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(50, 10) self.log_softmax = nn.LogSoftmax(dim=1) def forward(self, x): x = self.relu1(self.conv1(x)) x = self.relu2(self.conv2(x)) x = x.view(-1, 320) x = self.relu3(self.fc1(x)) x = self.fc2(x) return self.log_softmax(x) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 250880 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 3136 % 20 x0 = xindex % 3136 x3 = xindex // 3136 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x0 + 3200 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 39200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 784 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (50, 320), (320, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (10, 50), (50, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(144000)](buf1, primals_2, 144000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1)) buf3 = buf2 del buf2 buf10 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(250880)]( buf3, primals_5, buf10, 250880, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((784, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (784, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(39200)](buf5, primals_7, 39200, XBLOCK =256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((784, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf6) del primals_9 buf9 = empty_strided_cuda((784, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_3[grid(784)](buf6, buf9, 784, 10, XBLOCK=128, num_warps=8, num_stages=1) del buf6 return buf9, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3 , (784, 320), (320, 1), 0), buf5, buf9, primals_8, primals_6, buf10 class SmallMnistNoDropoutNew(nn.Module): def __init__(self): super(SmallMnistNoDropoutNew, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.relu2 = nn.ReLU() self.fc1 = nn.Linear(320, 50) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(50, 10) self.log_softmax = nn.LogSoftmax(dim=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
arjunsuresh/aimet
SmallMnistNoDropout
false
12,216
[ "BSD-3-Clause" ]
0
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
CosineDistance
import torch import torch.utils.data.dataloader import torch.nn def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class CosineDistance(torch.nn.Module): def forward(self, a, b): return -dot_product(a, b, normalize=True) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_neg_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = -tmp0 tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf0 del buf1 buf3 = buf2 del buf2 triton_poi_fused_neg_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class CosineDistanceNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chen-yuxuan/flair
CosineDistance
false
12,217
[ "MIT" ]
0
480d2c9afd66ab8d3bf40a676917e84dba3c4cee
https://github.com/chen-yuxuan/flair/tree/480d2c9afd66ab8d3bf40a676917e84dba3c4cee
BertSelfOutput
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class BertSelfOutput(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, hidden_states, input_tensor): if type(hidden_states) == list: hidden_states0 = self.dense0(hidden_states[0]) hidden_states1 = self.dense1(hidden_states[1]) if self.merge: hidden_states = self.merge_layer(torch.cat([hidden_states0, hidden_states1], dim=-1)) else: hidden_states = (hidden_states0 + hidden_states1) / 2 else: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1 class BertSelfOutputNew(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, input_0, input_1): primals_3 = self.LayerNorm.weight primals_5 = self.LayerNorm.bias primals_2 = self.dense.weight primals_6 = self.dense.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
christophschuhmann/BLIP
BertSelfOutput
false
12,218
[ "BSD-3-Clause" ]
0
498f963762db65e7290eea02573e1749f955b3d0
https://github.com/christophschuhmann/BLIP/tree/498f963762db65e7290eea02573e1749f955b3d0
LinearZeros
import torch import torch.nn as nn class LinearZeros(nn.Module): def __init__(self, in_channels, out_channels, logscale_factor=3): super().__init__() self.linear = nn.Linear(in_channels, out_channels) self.linear.weight.data.zero_() self.linear.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels)) def forward(self, input): output = self.linear(input) return output * torch.exp(self.logs * self.logscale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_0[grid(256)](buf0, primals_4, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class LinearZerosNew(nn.Module): def __init__(self, in_channels, out_channels, logscale_factor=3): super().__init__() self.linear = nn.Linear(in_channels, out_channels) self.linear.weight.data.zero_() self.linear.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels)) def forward(self, input_0): primals_2 = self.logs primals_1 = self.linear.weight primals_4 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
appuzanova/Glow-PyTorch
LinearZeros
false
12,219
[ "MIT" ]
0
50316b1b242f0f345b2df9e3e4538cfab5a60895
https://github.com/appuzanova/Glow-PyTorch/tree/50316b1b242f0f345b2df9e3e4538cfab5a60895
Conv2dZeros
import torch import torch.nn as nn def compute_same_pad(kernel_size, stride): if isinstance(kernel_size, int): kernel_size = [kernel_size] if isinstance(stride, int): stride = [stride] assert len(stride) == len(kernel_size ), 'Pass kernel size and stride both as int, or both as equal length iterable' return [(((k - 1) * s + 1) // 2) for k, s in zip(kernel_size, stride)] class Conv2dZeros(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding='same', logscale_factor=3): super().__init__() if padding == 'same': padding = compute_same_pad(kernel_size, stride) elif padding == 'valid': padding = 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels, 1, 1)) def forward(self, input): output = self.conv(input) return output * torch.exp(self.logs * self.logscale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 def compute_same_pad(kernel_size, stride): if isinstance(kernel_size, int): kernel_size = [kernel_size] if isinstance(stride, int): stride = [stride] assert len(stride) == len(kernel_size ), 'Pass kernel size and stride both as int, or both as equal length iterable' return [(((k - 1) * s + 1) // 2) for k, s in zip(kernel_size, stride)] class Conv2dZerosNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding='same', logscale_factor=3): super().__init__() if padding == 'same': padding = compute_same_pad(kernel_size, stride) elif padding == 'valid': padding = 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels, 1, 1)) def forward(self, input_0): primals_4 = self.logs primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
appuzanova/Glow-PyTorch
Conv2dZeros
false
12,220
[ "MIT" ]
0
50316b1b242f0f345b2df9e3e4538cfab5a60895
https://github.com/appuzanova/Glow-PyTorch/tree/50316b1b242f0f345b2df9e3e4538cfab5a60895
MaxSpatialPoolP4
import torch class MaxSpatialPoolP4(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super().__init__() self.inner = torch.nn.MaxPool2d(kernel_size, stride, padding) def forward(self, x): y = x.view(x.size(0), -1, x.size(3), x.size(4)) y = self.inner(y) y = y.view(x.size(0), -1, 4, y.size(2), y.size(3)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0), class MaxSpatialPoolP4New(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super().__init__() self.inner = torch.nn.MaxPool2d(kernel_size, stride, padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
claudio-unipv/groupcnn
MaxSpatialPoolP4
false
12,222
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
ModulatedConv2d
import math import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape if style is not None: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) else: style = torch.ones(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_4, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_2, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_4 = self.modulation.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
bomtorazek/contrastive-unpaired-translation
ModulatedConv2d
false
12,223
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
Pooler
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed class Pooler(nn.Module): """ Do pooling, possibly with a projection beforehand """ def __init__(self, d_inp, project=True, d_proj=512, pool_type='max'): super(Pooler, self).__init__() self.project = nn.Linear(d_inp, d_proj) if project else lambda x: x self.pool_type = pool_type def forward(self, sequence, mask): if len(mask.size()) < 3: mask = mask.unsqueeze(dim=-1) pad_mask = mask == 0 proj_seq = self.project(sequence) if self.pool_type == 'max': proj_seq = proj_seq.masked_fill(pad_mask, -float('inf')) seq_emb = proj_seq.max(dim=1)[0] elif self.pool_type == 'mean': proj_seq = proj_seq.masked_fill(pad_mask, 0) seq_emb = proj_seq.sum(dim=1) / mask.sum(dim=1) elif self.pool_type == 'final': idxs = mask.expand_as(proj_seq).sum(dim=1, keepdim=True).long() - 1 seq_emb = proj_seq.gather(dim=1, index=idxs) return seq_emb @classmethod def from_params(cls, d_inp, d_proj, project=True): return cls(d_inp, d_proj=d_proj, project=project) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_inp': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_max_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 512 x0 = xindex % 512 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, None, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), None, eviction_policy='evict_last' ).to(tl.int1) tmp5 = tl.load(in_ptr1 + (512 + x0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), None, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (1024 + x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), None, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr1 + (1536 + x0), None, eviction_policy='evict_last') tmp2 = float('-inf') tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tl.store(out_ptr0 + x2, tmp15, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (512, 4), (4, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor( primals_2, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32) triton_poi_fused_max_1[grid(2048)](buf0, buf1, buf2, 2048, XBLOCK= 128, num_warps=4, num_stages=1) return buf2, primals_4, buf0, buf1 class PoolerNew(nn.Module): """ Do pooling, possibly with a projection beforehand """ def __init__(self, d_inp, project=True, d_proj=512, pool_type='max'): super(PoolerNew, self).__init__() self.project = nn.Linear(d_inp, d_proj) if project else lambda x: x self.pool_type = pool_type @classmethod def from_params(cls, d_inp, d_proj, project=True): return cls(d_inp, d_proj=d_proj, project=project) def forward(self, input_0, input_1): primals_2 = self.project.weight primals_3 = self.project.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
cjmay/jiant
Pooler
false
12,224
[ "MIT" ]
0
46e6fa9d0fc73883468646cbd0f36f4166720911
https://github.com/cjmay/jiant/tree/46e6fa9d0fc73883468646cbd0f36f4166720911
ConvZ2P4
import torch class ConvZ2P4(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [torch.rot90(w, k, (2, 3)) for k in range(4)] return torch.cat(ws, 1).view(-1, w.size(1), w.size(2), w.size(3)) def forward(self, x): w = self._rotated(self.weight) y = torch.nn.functional.conv2d(x, w, stride=self.stride, padding= self.padding) y = y.view(y.size(0), -1, 4, y.size(2), y.size(3)) if self.bias is not None: y = y + self.bias.view(1, -1, 1, 1, 1) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 16 x3 = xindex // 256 x4 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x6 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (3 + -1 * x1 + 4 * x0 + 16 * (-4 + x2) + 64 * x3), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (15 + -1 * x4 + 16 * (-8 + x2) + 64 * x3), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (12 + x1 + -4 * x0 + 16 * (-12 + x2) + 64 * x3), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(in_out_ptr0 + x6, tmp22, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) buf1 = reinterpret_tensor(buf0, (16, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_cat_view_0[grid(1024)](buf1, primals_1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(primals_2, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 3, 3), (144, 9, 3, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 3, 3), (144, 36, 9, 3, 1), 0) del buf2 triton_poi_fused_add_1[grid(576)](buf3, primals_3, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf3, primals_2, buf1 class ConvZ2P4New(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [torch.rot90(w, k, (2, 3)) for k in range(4)] return torch.cat(ws, 1).view(-1, w.size(1), w.size(2), w.size(3)) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
claudio-unipv/groupcnn
ConvZ2P4
false
12,225
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
Envelope
import torch class Envelope(torch.nn.Module): def __init__(self, exponent): super(Envelope, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, x): p, a, b, c = self.p, self.a, self.b, self.c x_pow_p0 = x.pow(p) x_pow_p1 = x_pow_p0 * x env_val = 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x zero = torch.zeros_like(x) return torch.where(x < 1, env_val, zero) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'exponent': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp3 / tmp0 tmp5 = tmp4 * tmp1 tmp6 = tmp0 * tmp0 tmp7 = tmp6 * tmp6 tmp8 = -15.0 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tmp11 = tmp7 * tmp0 tmp12 = 24.0 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -10.0 tmp16 = tmp11 * tmp15 tmp17 = tmp16 * tmp0 tmp18 = tmp14 + tmp17 tmp19 = 0.0 tmp20 = tl.where(tmp2, tmp18, tmp19) tl.store(out_ptr0 + x0, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0[grid(256) ](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class EnvelopeNew(torch.nn.Module): def __init__(self, exponent): super(EnvelopeNew, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
coopersigrist/Multi-fragment-energy
Envelope
false
12,226
[ "MIT" ]
0
c21c1b884f364cf3f2ac71e393464e85ebeccb04
https://github.com/coopersigrist/Multi-fragment-energy/tree/c21c1b884f364cf3f2ac71e393464e85ebeccb04
SoftEntropy
import torch from torch import nn import torch.nn.functional as F from torch.nn import * from torch.optim.lr_scheduler import * class SoftEntropy(nn.Module): def __init__(self): super(SoftEntropy, self).__init__() self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, inputs, targets): log_probs = self.logsoftmax(inputs) loss = (-F.softmax(targets, dim=1).detach() * log_probs).mean(0).sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + x3, xmask) tmp11 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = -tmp8 tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = tmp9 * tmp23 tl.store(out_ptr0 + x3, tmp24, xmask) @triton.jit def triton_per_fused_mean_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_mean_sum_3[grid(1)](buf2, buf3, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf3, class SoftEntropyNew(nn.Module): def __init__(self): super(SoftEntropyNew, self).__init__() self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chrizandr/MMT
SoftEntropy
false
12,227
[ "MIT" ]
0
e2bb5984efb165e7ea1ed6080610cfe176344ac0
https://github.com/chrizandr/MMT/tree/e2bb5984efb165e7ea1ed6080610cfe176344ac0
FourierFeatures
import math import torch import torch.nn as nn class FourierFeatures(nn.Module): def __init__(self, in_features, out_features, std=1.0): super().__init__() assert out_features % 2 == 0 self.weight = nn.Parameter(torch.randn([out_features // 2, in_features]) * std) def forward(self, input): f = 2 * math.pi * input @ self.weight.T return torch.cat([f.cos(), f.sin()], dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 6.283185307179586 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl_math.cos(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.sin(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1 class FourierFeaturesNew(nn.Module): def __init__(self, in_features, out_features, std=1.0): super().__init__() assert out_features % 2 == 0 self.weight = nn.Parameter(torch.randn([out_features // 2, in_features]) * std) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
corajr/diffusion_gen
FourierFeatures
false
12,228
[ "MIT" ]
0
724377c8e244120cbd1caa75d474e3e14ded9bfa
https://github.com/corajr/diffusion_gen/tree/724377c8e244120cbd1caa75d474e3e14ded9bfa
MaxRotationPoolP4
import torch class MaxRotationPoolP4(torch.nn.Module): def forward(self, x): return x.max(2).values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxRotationPoolP4New(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
claudio-unipv/groupcnn
MaxRotationPoolP4
false
12,229
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
LinearFeedforward
import torch from torch import nn from torch.nn import functional as F import torch.utils.data class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearFeedforward(nn.Module): def __init__(self, d_in, d_hid, d_out, activation='relu'): super().__init__() self.feedforward = Feedforward(d_in, d_hid, activation=activation) self.linear = Linear(d_hid, d_out) self.dropout = nn.Dropout(0.2) def forward(self, x): return self.dropout(self.linear(self.feedforward(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4, 'd_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearFeedforwardNew(nn.Module): def __init__(self, d_in, d_hid, d_out, activation='relu'): super().__init__() self.feedforward = Feedforward(d_in, d_hid, activation=activation) self.linear = Linear(d_hid, d_out) self.dropout = nn.Dropout(0.2) def forward(self, input_0): primals_2 = self.feedforward.linear.weight primals_3 = self.feedforward.linear.bias primals_4 = self.linear.weight primals_5 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
cristipp/decaNLP
LinearFeedforward
false
12,230
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
ConvP4
import torch def _grot90(x, k): return torch.rot90(x.roll(k, 2), k, (3, 4)) class ConvP4(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, 4, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [_grot90(w, k).view(w.size(0), -1, w.size(3), w.size(4)) for k in range(4)] return torch.cat(ws, 1).view(4 * w.size(0), 4 * w.size(1), w.size(3 ), w.size(4)) def forward(self, x): x = x.view(x.size(0), -1, x.size(3), x.size(4)) w = self._rotated(self.weight) y = torch.nn.functional.conv2d(x, w, stride=self.stride, padding= self.padding) y = y.view(y.size(0), -1, 4, y.size(2), y.size(3)) if self.bias is not None: y = y + self.bias.view(1, -1, 1, 1, 1) return y def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_roll_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = (2 + x0) % 4 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_roll_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = (1 + x0) % 4 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 16 % 64 x3 = xindex // 1024 x4 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 16, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * (x2 % 16) + 256 * x3), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 32, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (3 + -1 * x1 + 4 * x0 + 16 * ((3 + (-16 + x2) % 4) % 4) + 64 * ((-16 + x2) // 4 % 4) + 256 * x3), tmp9, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 48, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (15 + -1 * x4 + 16 * tl.where((2 + (-32 + x2) % 4) % 4 < 0, 4 + (2 + (-32 + x2) % 4) % 4, (2 + (-32 + x2) % 4) % 4) + 64 * ((-32 + x2) // 4 % 4) + 256 * x3), tmp14, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 64, tl.int64) tmp19 = tl.load(in_ptr0 + (12 + x1 + -4 * x0 + 16 * tl.where((1 + (-48 + x2) % 4) % 4 < 0, 4 + (1 + (-48 + x2) % 4) % 4, (1 + (-48 + x2) % 4 ) % 4) + 64 * ((-48 + x2) // 4 % 4) + 256 * x3), tmp16, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x5, tmp22, None) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused_roll_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_roll_1[grid(4)](buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(4096)](primals_2, buf2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (16, 16, 4, 4), (256, 16, 4, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 3, 3), (144, 9, 3, 1)) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 3, 3), (144, 36, 9, 3, 1), 0) del buf3 triton_poi_fused_add_3[grid(576)](buf4, primals_3, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf4, reinterpret_tensor(primals_1, (4, 16, 4, 4), (256, 16, 4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (16, 16, 4, 4), (256, 16, 4, 1), 0) def _grot90(x, k): return torch.rot90(x.roll(k, 2), k, (3, 4)) class ConvP4New(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, 4, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [_grot90(w, k).view(w.size(0), -1, w.size(3), w.size(4)) for k in range(4)] return torch.cat(ws, 1).view(4 * w.size(0), 4 * w.size(1), w.size(3 ), w.size(4)) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
claudio-unipv/groupcnn
ConvP4
false
12,231
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
Attention
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_key': 4, 'dropout_ratio': 0.5, 'causal': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del arg1_1 buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class AttentionNew(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
cristipp/decaNLP
Attention
false
12,232
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
MultiHead
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_key': 4, 'd_value': 4, 'n_heads': 4, 'dropout_ratio': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 del buf18 del buf6 return buf19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0 ), buf5, buf9, buf13, buf17, reinterpret_tensor(buf2, (4, 1, 4), ( 16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHeadNew(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, input_0, input_1, input_2): primals_2 = self.wq.weight primals_4 = self.wk.weight primals_6 = self.wv.weight primals_1 = input_0 primals_3 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
cristipp/decaNLP
MultiHead
false
12,233
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
CaffeNormalize
import torch import torch.utils.data import torch.nn as nn class CaffeNormalize(nn.Module): def __init__(self, features, eps=1e-07): super(CaffeNormalize, self).__init__() self.scale = nn.Parameter(10.0 * torch.ones(features)) self.eps = eps def forward(self, x): x_size = x.size() norm = x.norm(2, dim=1, keepdim=True) x = x.div(norm + self.eps) return x.mul(self.scale.view(1, x_size[1], 1, 1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class CaffeNormalizeNew(nn.Module): def __init__(self, features, eps=1e-07): super(CaffeNormalizeNew, self).__init__() self.scale = nn.Parameter(10.0 * torch.ones(features)) self.eps = eps def forward(self, input_0): primals_2 = self.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
cynthiamao98/DepthAwareCNN
CaffeNormalize
false
12,234
[ "MIT" ]
0
824cffaa4159e3dc7cc251a4a659e35c437bb92c
https://github.com/cynthiamao98/DepthAwareCNN/tree/824cffaa4159e3dc7cc251a4a659e35c437bb92c
TransformerEncoderLayer
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearReLU(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.feedforward = Feedforward(d_model, d_hidden, activation='relu') self.linear = Linear(d_hidden, d_model) def forward(self, x, padding=None): return self.linear(self.feedforward(x)) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, dropout_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(dropout_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x, padding=None): return self.layernorm(x[0] + self.dropout(self.layer(*x, padding= padding))) class TransformerEncoderLayer(nn.Module): def __init__(self, dimension, n_heads, hidden, dropout): super().__init__() self.selfattn = ResidualBlock(MultiHead(dimension, dimension, n_heads, dropout), dimension, dropout) self.feedforward = ResidualBlock(LinearReLU(dimension, hidden), dimension, dropout) def forward(self, x, padding=None): return self.feedforward(self.selfattn(x, x, x, padding=padding)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4, 'n_heads': 4, 'hidden': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tmp0 * tmp5 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 buf20 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0) del buf6 buf21 = buf20 del buf20 buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0) del buf18 triton_poi_fused_add_mean_std_3[grid(16)](buf21, primals_1, buf19, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_5, primals_1, buf19, buf22, buf21, primals_6, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_6 buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf24) buf25 = reinterpret_tensor(buf24, (4, 4, 4), (16, 4, 1), 0) del buf24 buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf25, primals_8, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0) del buf26 triton_poi_fused_add_6[grid(64)](buf27, buf23, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_11, buf27, primals_12, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return (buf28, primals_1, primals_5, primals_11, buf5, buf9, buf13, buf17, buf19, reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf27, primals_9, buf29, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearReLU(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.feedforward = Feedforward(d_model, d_hidden, activation='relu') self.linear = Linear(d_hidden, d_model) def forward(self, x, padding=None): return self.linear(self.feedforward(x)) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, dropout_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(dropout_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x, padding=None): return self.layernorm(x[0] + self.dropout(self.layer(*x, padding= padding))) class TransformerEncoderLayerNew(nn.Module): def __init__(self, dimension, n_heads, hidden, dropout): super().__init__() self.selfattn = ResidualBlock(MultiHead(dimension, dimension, n_heads, dropout), dimension, dropout) self.feedforward = ResidualBlock(LinearReLU(dimension, hidden), dimension, dropout) def forward(self, input_0): primals_2 = self.selfattn.layer.wq.weight primals_3 = self.selfattn.layer.wk.weight primals_4 = self.selfattn.layer.wv.weight primals_5 = self.selfattn.layernorm.gamma primals_6 = self.selfattn.layernorm.beta primals_7 = self.feedforward.layer.feedforward.linear.weight primals_8 = self.feedforward.layer.feedforward.linear.bias primals_9 = self.feedforward.layer.linear.weight primals_10 = self.feedforward.layer.linear.bias primals_11 = self.feedforward.layernorm.gamma primals_12 = self.feedforward.layernorm.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
cristipp/decaNLP
TransformerEncoderLayer
false
12,235
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
GradLoss
import torch import torch.nn as nn class GradLoss(nn.Module): def __init__(self): super(GradLoss, self).__init__() def forward(self, grad_fake, grad_real): return torch.sum(torch.mean(torch.abs(grad_real - grad_fake))) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class GradLossNew(nn.Module): def __init__(self): super(GradLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
GradLoss
false
12,236
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
RMSE_log
import torch import torch.nn as nn import torch.nn.functional as F class RMSE_log(nn.Module): def __init__(self): super(RMSE_log, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.sqrt(torch.mean(torch.abs(torch.log(real) - torch.log( fake)) ** 2)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_log_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = libdevice.sqrt(tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_log_mean_pow_sqrt_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSE_logNew(nn.Module): def __init__(self): super(RMSE_logNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
RMSE_log
false
12,237
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
RMSE
import torch import torch.nn as nn import torch.nn.functional as F class RMSE(nn.Module): def __init__(self): super(RMSE, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.sqrt(torch.mean(torch.abs(10.0 * real - 10.0 * fake) ** 2) ) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tmp13 = libdevice.sqrt(tmp12) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_pow_sqrt_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSENew(nn.Module): def __init__(self): super(RMSENew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
RMSE
false
12,238
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
LayerNorm
import torch import torch.utils.data import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, features, eps=1e-06, gamma=1.0, beta=0.0, learnable= False): super(LayerNorm, self).__init__() if learnable: self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) else: self.gamma = gamma self.beta = beta self.eps = eps def forward(self, x): x_size = x.size() mean = x.view(x_size[0], x_size[1], x_size[2] * x_size[3]).mean(2 ).view(x_size[0], x_size[1], 1, 1).repeat(1, 1, x_size[2], x_size[3]) std = x.view(x_size[0], x_size[1], x_size[2] * x_size[3]).std(2).view( x_size[0], x_size[1], 1, 1).repeat(1, 1, x_size[2], x_size[3]) return self.gamma * (x - mean) / (std + self.eps) + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_repeat_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = tmp0 - tmp20 tmp22 = 1.0 tmp23 = tmp21 * tmp22 tmp24 = 15.0 tmp25 = tmp18 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp23 / tmp28 tmp30 = 0.0 tmp31 = tmp29 + tmp30 tl.store(out_ptr2 + (r1 + 16 * x0), tmp31, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_repeat_std_sub_0[grid(16)](arg0_1, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class LayerNormNew(nn.Module): def __init__(self, features, eps=1e-06, gamma=1.0, beta=0.0, learnable= False): super(LayerNormNew, self).__init__() if learnable: self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) else: self.gamma = gamma self.beta = beta self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cynthiamao98/DepthAwareCNN
LayerNorm
false
12,239
[ "MIT" ]
0
824cffaa4159e3dc7cc251a4a659e35c437bb92c
https://github.com/cynthiamao98/DepthAwareCNN/tree/824cffaa4159e3dc7cc251a4a659e35c437bb92c
L1
import torch import torch.nn as nn import torch.nn.functional as F class L1(nn.Module): def __init__(self): super(L1, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.mean(torch.abs(10.0 * real - 10.0 * fake)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1New(nn.Module): def __init__(self): super(L1New, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
L1
false
12,240
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
FocalLoss
import torch from torch import nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input, target): if not target.size() == input.size(): raise ValueError( 'Target size ({}) must be the same as input size ({})'. format(target.size(), input.size())) max_val = (-input).clamp(min=0) loss = input - input * target + max_val + ((-max_val).exp() + (- input - max_val).exp()).log() invprobs = F.logsigmoid(-input * (target * 2 - 1)) loss = (invprobs * self.gamma).exp() * loss return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'gamma': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = 4.0 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp0 * tmp2 tmp19 = tmp0 - tmp18 tmp20 = triton_helpers.maximum(tmp1, tmp8) tmp21 = tmp19 + tmp20 tmp22 = -tmp20 tmp23 = tl_math.exp(tmp22) tmp24 = tmp1 - tmp20 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tl_math.log(tmp26) tmp28 = tmp21 + tmp27 tmp29 = tmp17 * tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = 256.0 tmp34 = tmp32 / tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp34, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class FocalLossNew(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dainis-boumber/nlp-loss-functions
FocalLoss
false
12,241
[ "Apache-2.0" ]
0
735d1e74bf9b9705a56cbb718b85448575efb5ee
https://github.com/dainis-boumber/nlp-loss-functions/tree/735d1e74bf9b9705a56cbb718b85448575efb5ee
ConvEncoder
import torch from torch import nn class ConvEncoder(nn.Module): """ Simple convolutional encoder network. It consists of 5 convolutional layers, each downsampling the input by a factor of 2, and a final fully-connected layer projecting the output to c_dim dimenions. Args: c_dim (int): output dimension of latent embedding """ def __init__(self, c_dim=128): super().__init__() self.conv0 = nn.Conv2d(3, 32, 3, stride=2) self.conv1 = nn.Conv2d(32, 64, 3, stride=2) self.conv2 = nn.Conv2d(64, 128, 3, stride=2) self.conv3 = nn.Conv2d(128, 256, 3, stride=2) self.conv4 = nn.Conv2d(256, 512, 3, stride=2) self.fc_out = nn.Linear(512, c_dim) self.actvn = nn.ReLU() def forward(self, x): batch_size = x.size(0) net = self.conv0(x) net = self.conv1(self.actvn(net)) net = self.conv2(self.actvn(net)) net = self.conv3(self.actvn(net)) net = self.conv4(self.actvn(net)) net = net.view(batch_size, 512, -1).mean(2) out = self.fc_out(self.actvn(net)) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mean_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 / tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(in_out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (128, 512), (512, 1)) assert_size_stride(primals_13, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) triton_poi_fused_1[grid(96, 9)](primals_2, buf1, 96, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(32768, 9)](primals_8, buf4, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_5[grid(131072, 9)](primals_10, buf5, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = extern_kernels.convolution(buf0, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 31, 31), (30752, 1, 992, 32)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_6[grid(123008)](buf7, primals_3, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_3 buf8 = extern_kernels.convolution(buf7, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 15, 15), (14400, 1, 960, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_7[grid(57600)](buf9, primals_5, 57600, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf10 = extern_kernels.convolution(buf9, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 7, 7), (6272, 1, 896, 128)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_8[grid(25088)](buf11, primals_7, 25088, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 3, 3), (2304, 1, 768, 256)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_9[grid(9216)](buf13, primals_9, 9216, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf14 = extern_kernels.convolution(buf13, buf5, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 1, 1), (512, 1, 512, 512)) buf15 = reinterpret_tensor(buf14, (4, 512), (512, 1), 0) del buf14 triton_poi_fused_mean_relu_10[grid(2048)](buf15, primals_11, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf16 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_13, buf15, reinterpret_tensor( primals_12, (512, 128), (1, 512), 0), alpha=1, beta=1, out=buf16) del primals_13 return (buf16, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15, primals_12) class ConvEncoderNew(nn.Module): """ Simple convolutional encoder network. It consists of 5 convolutional layers, each downsampling the input by a factor of 2, and a final fully-connected layer projecting the output to c_dim dimenions. Args: c_dim (int): output dimension of latent embedding """ def __init__(self, c_dim=128): super().__init__() self.conv0 = nn.Conv2d(3, 32, 3, stride=2) self.conv1 = nn.Conv2d(32, 64, 3, stride=2) self.conv2 = nn.Conv2d(64, 128, 3, stride=2) self.conv3 = nn.Conv2d(128, 256, 3, stride=2) self.conv4 = nn.Conv2d(256, 512, 3, stride=2) self.fc_out = nn.Linear(512, c_dim) self.actvn = nn.ReLU() def forward(self, input_0): primals_2 = self.conv0.weight primals_3 = self.conv0.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_10 = self.conv4.weight primals_11 = self.conv4.bias primals_12 = self.fc_out.weight primals_13 = self.fc_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
crysoberil/ObjectReconstruction_ONetBased
ConvEncoder
false
12,242
[ "MIT" ]
0
7c15ea8a64ee3647c86b57b16f0c85bd51ccdd47
https://github.com/crysoberil/ObjectReconstruction_ONetBased/tree/7c15ea8a64ee3647c86b57b16f0c85bd51ccdd47
L1_log
import torch import torch.nn as nn import torch.nn.functional as F class L1_log(nn.Module): def __init__(self): super(L1_log, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.mean(torch.abs(torch.log(real) - torch.log(fake))) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = 256.0 tmp10 = tmp8 / tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_log_mean_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1_logNew(nn.Module): def __init__(self): super(L1_logNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
L1_log
false
12,243
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
NormalLoss
import torch import torch.nn as nn class NormalLoss(nn.Module): def __init__(self): super(NormalLoss, self).__init__() def forward(self, grad_fake, grad_real): prod = (grad_fake[:, :, None, :] @ grad_real[:, :, :, None]).squeeze(-1 ).squeeze(-1) fake_norm = torch.sqrt(torch.sum(grad_fake ** 2, dim=-1)) real_norm = torch.sqrt(torch.sum(grad_real ** 2, dim=-1)) return 1 - torch.mean(prod / (fake_norm * real_norm)) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_out_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp14 = tmp13 * tmp13 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp12 * tmp24 tmp26 = tmp0 / tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = 16.0 tmp31 = tmp29 / tmp30 tmp32 = 1.0 tmp33 = tmp32 - tmp31 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 1), (4, 1, 1), 0), out=buf0) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0[grid(1)](buf1, buf3, arg0_1, arg1_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 return buf3, class NormalLossNew(nn.Module): def __init__(self): super(NormalLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
NormalLoss
false
12,244
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
ScaledDotProductAttention
import torch def masked_softmax(x, m=None, dim=-1): """ Softmax with mask (optional) """ x = torch.clamp(x, min=-15.0, max=15.0) if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06) return softmax class ScaledDotProductAttention(torch.nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, dropout=0.1): super().__init__() self.temperature = temperature self.dropout = torch.nn.Dropout(dropout) def forward(self, q, k, v, mask): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature attn = masked_softmax(attn, mask, 2) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = -15.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp10, tmp3) tmp12 = triton_helpers.minimum(tmp11, tmp5) tmp14 = tmp12 * tmp13 tmp15 = triton_helpers.maximum(tmp8, tmp14) tmp17 = tmp16 * tmp1 tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp21 = tmp19 * tmp20 tmp22 = triton_helpers.maximum(tmp15, tmp21) tmp24 = tmp23 * tmp1 tmp25 = triton_helpers.maximum(tmp24, tmp3) tmp26 = triton_helpers.minimum(tmp25, tmp5) tmp28 = tmp26 * tmp27 tmp29 = triton_helpers.maximum(tmp22, tmp28) tmp30 = tmp8 - tmp29 tmp31 = tl_math.exp(tmp30) tmp32 = tmp31 * tmp7 tmp33 = tmp14 - tmp29 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 * tmp13 tmp36 = tmp32 + tmp35 tmp37 = tmp21 - tmp29 tmp38 = tl_math.exp(tmp37) tmp39 = tmp38 * tmp20 tmp40 = tmp36 + tmp39 tmp41 = tmp28 - tmp29 tmp42 = tl_math.exp(tmp41) tmp43 = tmp42 * tmp27 tmp44 = tmp40 + tmp43 tl.store(out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + x0, tmp44, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_mul_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp7 = tl.load(in_ptr0 + x2, xmask) tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = -15.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 * tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 * tmp7 tmp14 = 1e-06 tmp15 = tmp13 + tmp14 tmp16 = tmp12 / tmp15 tl.store(in_out_ptr0 + x2, tmp16, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0[grid(16)](buf0, arg2_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 triton_poi_fused_add_clamp_div_exp_max_mul_sub_1[grid(64)](buf3, arg2_1, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg2_1 del buf1 del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, arg3_1, out=buf4) del arg3_1 return buf4, buf3 def masked_softmax(x, m=None, dim=-1): """ Softmax with mask (optional) """ x = torch.clamp(x, min=-15.0, max=15.0) if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06) return softmax class ScaledDotProductAttentionNew(torch.nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, dropout=0.1): super().__init__() self.temperature = temperature self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
daiki-kimura/commonsense-rl
ScaledDotProductAttention
false
12,245
[ "Apache-2.0" ]
0
5513926957b6501ce9cfa46f77f8f2c1c4892fa5
https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5
SoftWingLoss
import math import torch import torch.nn as nn class SoftWingLoss(nn.Module): """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face Alignment' Lin et al. TIP'2021. loss = 1. |x| , if |x| < omega1 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 Args: omega1 (float): The first threshold. omega2 (float): The second threshold. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega1 = omega1 self.omega2 = omega2 self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / self.epsilon) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega1, delta, self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 2.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp4 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tl_math.log(tmp8) tmp10 = 20.0 tmp11 = tmp9 * tmp10 tmp12 = -30.188758248682007 tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp5, tmp3, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(16)]( arg0_1, arg1_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_mul_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf0 return buf1, class SoftWingLossNew(nn.Module): """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face Alignment' Lin et al. TIP'2021. loss = 1. |x| , if |x| < omega1 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 Args: omega1 (float): The first threshold. omega2 (float): The second threshold. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega1 = omega1 self.omega2 = omega2 self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / self.epsilon) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega1, delta, self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chenxinfeng4/mmpose
SoftWingLoss
false
12,246
[ "Apache-2.0" ]
0
b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
https://github.com/chenxinfeng4/mmpose/tree/b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
CNN
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 8 * 8 * 20) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=-1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 20 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 % 20 x2 = xindex // 400 x5 = xindex x4 = xindex // 8000 x6 = xindex % 8000 tmp0 = tl.load(in_ptr0 + (3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (60 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (61 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (62 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (120 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (121 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (122 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x5, tmp16, xmask) tl.store(out_ptr1 + (x6 + 8064 * x4), tmp41, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 1280), (1280, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (10, 64), (64, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 20, 60, 60), (72320, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(288000)](buf0, primals_2, buf1, 288000, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 20, 20, 20), (8000, 400, 20, 1), torch.float32) buf3 = empty_strided_cuda((4, 20, 20, 20), (8064, 400, 20, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(32000)](buf1, buf2, buf3, 32000, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((25, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0 ), reinterpret_tensor(primals_4, (1280, 64), (1, 1280), 0), out =buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(1600)](buf5, primals_5, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((25, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_7 buf9 = empty_strided_cuda((25, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_3[grid(25)](buf6, buf9, 25, 10, XBLOCK=8, num_warps=2, num_stages=1) del buf6 return buf9, primals_1, primals_3, buf1, buf3, reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0), buf5, buf9, primals_6, primals_4 class CNNNew(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
danielrjiang/Ax
CNN
false
12,247
[ "MIT" ]
0
43014b28683b3037b5c7307869cb9b75ca31ffb6
https://github.com/danielrjiang/Ax/tree/43014b28683b3037b5c7307869cb9b75ca31ffb6
Attention
import torch import torch.nn as nn class Attention(nn.Module): """ Applies attention mechanism on the `context` using the `query`. **Thank you** to IBM for their initial implementation of :class:`Attention`. Here is their `License <https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__. Args: dimensions (int): Dimensionality of the query and context. attention_type (str, optional): How to compute the attention score: * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` Example: >>> attention = Attention(256) >>> query = torch.randn(5, 1, 256) >>> context = torch.randn(5, 5, 256) >>> output, weights = attention(query, context) >>> output.size() torch.Size([5, 1, 256]) >>> weights.size() torch.Size([5, 1, 5]) """ def __init__(self, query_dim, context_dim, attention_type='general'): super(Attention, self).__init__() if attention_type not in ['dot', 'general']: raise ValueError('Invalid attention type selected.') self.attention_type = attention_type if self.attention_type == 'general': self.linear_in = nn.Linear(query_dim, query_dim, bias=False) if query_dim != context_dim: self.linear_proj = nn.Linear(query_dim, context_dim, bias=False) self.linear_out = nn.Linear(context_dim * 2, context_dim, bias=False) self.softmax = nn.Softmax(dim=-1) self.tanh = nn.Tanh() def forward(self, query, context): """ Args: query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of queries to query the context. context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data overwhich to apply the attention mechanism. Returns: :class:`tuple` with `output` and `weights`: * **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]): Tensor containing the attended features. * **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]): Tensor containing attention weights. """ batch_size, output_len, query_dim = query.size() batch_size, query_len, context_dim = context.size() if self.attention_type == 'general': query = query.reshape(batch_size * output_len, query_dim) query = self.linear_in(query) query = query.reshape(batch_size, output_len, query_dim) if query_dim != context_dim: query = self.linear_proj(query) attention_scores = torch.bmm(query, context.transpose(1, 2). contiguous()) attention_scores = attention_scores.view(batch_size * output_len, query_len) attention_weights = self.softmax(attention_scores) attention_weights = attention_weights.view(batch_size, output_len, query_len) mix = torch.bmm(attention_weights, context) combined = torch.cat((mix, query), dim=2) combined = combined.view(batch_size * output_len, 2 * context_dim) output = self.linear_out(combined).view(batch_size, output_len, context_dim) output = self.tanh(output) attention_weights = attention_weights.mean(dim=1) return output, attention_weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'query_dim': 4, 'context_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 16 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf10 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 4)](primals_2, buf1, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf1, out=buf2) buf3 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf5) buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](buf5, buf0, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf7 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0) del buf5 extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0) del buf7 triton_poi_fused_tanh_4[grid(64)](buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mean_5[grid(16)](buf4, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 return buf8, buf9, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0 ), buf2, reinterpret_tensor(buf6, (16, 8), (8, 1), 0 ), buf8, primals_4, buf10 class AttentionNew(nn.Module): """ Applies attention mechanism on the `context` using the `query`. **Thank you** to IBM for their initial implementation of :class:`Attention`. Here is their `License <https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__. Args: dimensions (int): Dimensionality of the query and context. attention_type (str, optional): How to compute the attention score: * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` Example: >>> attention = Attention(256) >>> query = torch.randn(5, 1, 256) >>> context = torch.randn(5, 5, 256) >>> output, weights = attention(query, context) >>> output.size() torch.Size([5, 1, 256]) >>> weights.size() torch.Size([5, 1, 5]) """ def __init__(self, query_dim, context_dim, attention_type='general'): super(AttentionNew, self).__init__() if attention_type not in ['dot', 'general']: raise ValueError('Invalid attention type selected.') self.attention_type = attention_type if self.attention_type == 'general': self.linear_in = nn.Linear(query_dim, query_dim, bias=False) if query_dim != context_dim: self.linear_proj = nn.Linear(query_dim, context_dim, bias=False) self.linear_out = nn.Linear(context_dim * 2, context_dim, bias=False) self.softmax = nn.Softmax(dim=-1) self.tanh = nn.Tanh() def forward(self, input_0, input_1): primals_3 = self.linear_in.weight primals_4 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
daiki-kimura/commonsense-rl
Attention
false
12,248
[ "Apache-2.0" ]
0
5513926957b6501ce9cfa46f77f8f2c1c4892fa5
https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5
Critic
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1616 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 404 x1 = xindex // 404 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 400, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (400 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 404, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-400 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (300, 404), (404, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 404), (404, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1616)](buf0, primals_2, primals_4, buf1, 1616, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (404, 300), ( 1, 404), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(1200)](buf3, primals_6, 1200, XBLOCK= 128, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 400), (400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(1600)](buf0, primals_2, buf6, 1600, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
david-varela/collaboration_and_competition
Critic
false
12,250
[ "MIT" ]
0
a170cc02eb3917af19d6aafa8b37f6089b83c35f
https://github.com/david-varela/collaboration_and_competition/tree/a170cc02eb3917af19d6aafa8b37f6089b83c35f
CuboidPoseHead
import torch import torch.nn as nn from torchvision.transforms import functional as F import torch.nn.functional as F class CuboidPoseHead(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHead, self).__init__() self.beta = beta self.loss = nn.L1Loss() def forward(self, heatmap_volumes, grid_coordinates): """ Args: heatmap_volumes (torch.Tensor(NxKxLxWxH)): 3D human pose heatmaps predicted by the network. grid_coordinates (torch.Tensor(Nx(LxWxH)x3)): Coordinates of the grids in the heatmap volumes. Returns: human_poses (torch.Tensor(NxKx3)): Coordinates of human poses. """ batch_size = heatmap_volumes.size(0) channel = heatmap_volumes.size(1) x = heatmap_volumes.reshape(batch_size, channel, -1, 1) x = F.softmax(self.beta * x, dim=2) grid_coordinates = grid_coordinates.unsqueeze(1) x = torch.mul(x, grid_coordinates) human_poses = torch.sum(x, dim=2) return human_poses def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'beta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class CuboidPoseHeadNew(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHeadNew, self).__init__() self.beta = beta self.loss = nn.L1Loss() def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chenxinfeng4/mmpose
CuboidPoseHead
false
12,252
[ "Apache-2.0" ]
0
b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
https://github.com/chenxinfeng4/mmpose/tree/b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
Conv1D
import torch import torch.nn as nn from collections import OrderedDict class Conv1D(nn.Module): def __init__(self, embedding_dim, hidden_dim): super(Conv1D, self).__init__() self.convs = nn.Sequential(OrderedDict([('conv1', nn.Conv1d( embedding_dim, hidden_dim, kernel_size=3, stride=1, padding=2)), ('relu1', nn.ReLU()), ('conv2', nn.Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('relu2', nn.ReLU()), ('conv3', nn. Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('tanh', nn.Tanh())])) def forward(self, embedding): return self.convs(embedding.transpose(-2, -1)).transpose(-2, -1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'embedding_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 6 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(4, 4)](primals_1, buf0, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 6), (24, 6, 1)) del buf0 buf2 = reinterpret_tensor(buf1, (4, 6), (6, 1), 0) del buf1 buf9 = empty_strided_cuda((4, 6), (6, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(24)](buf2, primals_3, buf9, 24, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 6 ), (0, 6, 1), 0), primals_4, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 8), (32, 8, 1)) buf4 = reinterpret_tensor(buf3, (4, 8), (8, 1), 0) del buf3 buf8 = empty_strided_cuda((4, 8), (8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(32)](buf4, primals_5, buf8, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 4, 8 ), (0, 8, 1), 0), primals_6, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 10), (40, 10, 1)) buf6 = reinterpret_tensor(buf5, (4, 10), (10, 1), 0) del buf5 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_3[grid(40)](buf6, primals_7, buf7, 40, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return reinterpret_tensor(buf6, (10, 4), (1, 10), 0 ), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, ( 1, 4, 4), (4, 1, 4), 0), reinterpret_tensor(buf2, (1, 4, 6), (24, 6, 1), 0), reinterpret_tensor(buf4, (1, 4, 8), (32, 8, 1), 0 ), buf7, buf8, buf9 class Conv1DNew(nn.Module): def __init__(self, embedding_dim, hidden_dim): super(Conv1DNew, self).__init__() self.convs = nn.Sequential(OrderedDict([('conv1', nn.Conv1d( embedding_dim, hidden_dim, kernel_size=3, stride=1, padding=2)), ('relu1', nn.ReLU()), ('conv2', nn.Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('relu2', nn.ReLU()), ('conv3', nn. Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('tanh', nn.Tanh())])) def forward(self, input_0): primals_2 = self.convs.conv1.weight primals_3 = self.convs.conv1.bias primals_4 = self.convs.conv2.weight primals_5 = self.convs.conv2.bias primals_6 = self.convs.conv3.weight primals_7 = self.convs.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
danielTLevy/PPO-PyTorch
Conv1D
false
12,253
[ "MIT" ]
0
e9f5a34d3cf40135dfdb0ddb082c20f5035e23f7
https://github.com/danielTLevy/PPO-PyTorch/tree/e9f5a34d3cf40135dfdb0ddb082c20f5035e23f7
StyledConv
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self .scale) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConv(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() self.activate = FusedLeakyReLU(out_channel) def forward(self, input, style, noise=None): out = self.conv(input, style) out = self.noise(out, noise=noise) out = self.activate(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 25 x2 = xindex // 100 x1 = xindex // 25 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = 1.4142135623730951 tmp14 = tmp12 * tmp13 tl.store(out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32) buf8 = torch.ops.aten.normal_functional.default(buf7) del buf7 buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32 ) triton_poi_fused_add_leaky_relu_mul_3[grid(400)](buf6, primals_6, buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_6 del primals_7 return buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10 def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self .scale) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConvNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() self.activate = FusedLeakyReLU(out_channel) def forward(self, input_0, input_1): primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_6 = self.noise.weight primals_7 = self.activate.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
davidetalon/StyleCLIP
StyledConv
false
12,254
[ "MIT" ]
0
1cbf552b322cd90c417f26a259143382e2b7af8f
https://github.com/davidetalon/StyleCLIP/tree/1cbf552b322cd90c417f26a259143382e2b7af8f
NegativeScaledDotProduct
import torch import torch.utils.data.dataloader import torch.nn def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class NegativeScaledDotProduct(torch.nn.Module): def forward(self, a, b): sqrt_d = torch.sqrt(torch.tensor(a.size(-1))) return -dot_product(a, b, normalize=False) / sqrt_d def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_neg_sqrt_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_neg_sqrt_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class NegativeScaledDotProductNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chen-yuxuan/flair
NegativeScaledDotProduct
false
12,255
[ "MIT" ]
0
480d2c9afd66ab8d3bf40a676917e84dba3c4cee
https://github.com/chen-yuxuan/flair/tree/480d2c9afd66ab8d3bf40a676917e84dba3c4cee
GAT
import torch import torch.nn as nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.matmul(input, self.W) N = h.size()[1] batch_size = h.size(0) a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1), h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self. out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 16 x2 = xindex // 128 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + 16 * ((4 * x1 + 64 * x2 + x0) // 64 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1) tmp9 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1) tmp16 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (32 + x0), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1) tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (48 + x0), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp41 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp46 = tl.load(in_ptr4 + (16 + x0), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp52 = tl.load(in_ptr4 + (32 + x0), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr3 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp58 = tl.load(in_ptr4 + (48 + x0), xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp75 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp79 = tl.load(in_ptr5 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp80 = tl.load(in_ptr6 + (16 + x0), xmask, eviction_policy='evict_last') tmp85 = tl.load(in_ptr5 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp86 = tl.load(in_ptr6 + (32 + x0), xmask, eviction_policy='evict_last') tmp91 = tl.load(in_ptr5 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp92 = tl.load(in_ptr6 + (48 + x0), xmask, eviction_policy='evict_last') tmp108 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp109 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last') tmp113 = tl.load(in_ptr7 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp114 = tl.load(in_ptr8 + (16 + x0), xmask, eviction_policy='evict_last') tmp119 = tl.load(in_ptr7 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp120 = tl.load(in_ptr8 + (32 + x0), xmask, eviction_policy='evict_last') tmp125 = tl.load(in_ptr7 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp126 = tl.load(in_ptr8 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp42 = tmp41 * tmp3 tmp43 = tl.where(tmp40, tmp41, tmp42) tmp44 = tl.where(tmp0, tmp43, tmp6) tmp47 = tmp46 * tmp3 tmp48 = tl.where(tmp45, tmp46, tmp47) tmp49 = tl.where(tmp8, tmp48, tmp6) tmp50 = triton_helpers.maximum(tmp44, tmp49) tmp53 = tmp52 * tmp3 tmp54 = tl.where(tmp51, tmp52, tmp53) tmp55 = tl.where(tmp15, tmp54, tmp6) tmp56 = triton_helpers.maximum(tmp50, tmp55) tmp59 = tmp58 * tmp3 tmp60 = tl.where(tmp57, tmp58, tmp59) tmp61 = tl.where(tmp22, tmp60, tmp6) tmp62 = triton_helpers.maximum(tmp56, tmp61) tmp63 = tmp44 - tmp62 tmp64 = tl_math.exp(tmp63) tmp65 = tmp49 - tmp62 tmp66 = tl_math.exp(tmp65) tmp67 = tmp64 + tmp66 tmp68 = tmp55 - tmp62 tmp69 = tl_math.exp(tmp68) tmp70 = tmp67 + tmp69 tmp71 = tmp61 - tmp62 tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp76 = tmp75 * tmp3 tmp77 = tl.where(tmp74, tmp75, tmp76) tmp78 = tl.where(tmp0, tmp77, tmp6) tmp81 = tmp80 * tmp3 tmp82 = tl.where(tmp79, tmp80, tmp81) tmp83 = tl.where(tmp8, tmp82, tmp6) tmp84 = triton_helpers.maximum(tmp78, tmp83) tmp87 = tmp86 * tmp3 tmp88 = tl.where(tmp85, tmp86, tmp87) tmp89 = tl.where(tmp15, tmp88, tmp6) tmp90 = triton_helpers.maximum(tmp84, tmp89) tmp93 = tmp92 * tmp3 tmp94 = tl.where(tmp91, tmp92, tmp93) tmp95 = tl.where(tmp22, tmp94, tmp6) tmp96 = triton_helpers.maximum(tmp90, tmp95) tmp97 = tmp78 - tmp96 tmp98 = tl_math.exp(tmp97) tmp99 = tmp83 - tmp96 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp89 - tmp96 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp95 - tmp96 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp110 = tmp109 * tmp3 tmp111 = tl.where(tmp108, tmp109, tmp110) tmp112 = tl.where(tmp0, tmp111, tmp6) tmp115 = tmp114 * tmp3 tmp116 = tl.where(tmp113, tmp114, tmp115) tmp117 = tl.where(tmp8, tmp116, tmp6) tmp118 = triton_helpers.maximum(tmp112, tmp117) tmp121 = tmp120 * tmp3 tmp122 = tl.where(tmp119, tmp120, tmp121) tmp123 = tl.where(tmp15, tmp122, tmp6) tmp124 = triton_helpers.maximum(tmp118, tmp123) tmp127 = tmp126 * tmp3 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tl.where(tmp22, tmp128, tmp6) tmp130 = triton_helpers.maximum(tmp124, tmp129) tmp131 = tmp112 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tmp117 - tmp130 tmp134 = tl_math.exp(tmp133) tmp135 = tmp132 + tmp134 tmp136 = tmp123 - tmp130 tmp137 = tl_math.exp(tmp136) tmp138 = tmp135 + tmp137 tmp139 = tmp129 - tmp130 tmp140 = tl_math.exp(tmp139) tmp141 = tmp138 + tmp140 tl.store(out_ptr0 + x2, tmp28, xmask) tl.store(out_ptr1 + x2, tmp39, xmask) tl.store(out_ptr2 + x2, tmp62, xmask) tl.store(out_ptr3 + x2, tmp73, xmask) tl.store(out_ptr4 + x2, tmp96, xmask) tl.store(out_ptr5 + x2, tmp107, xmask) tl.store(out_ptr6 + x2, tmp130, xmask) tl.store(out_ptr7 + x2, tmp141, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr5 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp14 = tl.load(in_ptr6 + x4, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr7 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr8 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr9 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp24 = tl.load(in_ptr10 + x4, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr11 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr12 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr13 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp34 = tl.load(in_ptr14 + x4, xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr15 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp41 = tl.load(in_ptr16 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp15 = tmp14 * tmp3 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tl.where(tmp0, tmp16, tmp6) tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp0, tmp26, tmp6) tmp29 = tmp27 - tmp28 tmp30 = tl_math.exp(tmp29) tmp32 = tmp30 / tmp31 tmp35 = tmp34 * tmp3 tmp36 = tl.where(tmp33, tmp34, tmp35) tmp37 = tl.where(tmp0, tmp36, tmp6) tmp39 = tmp37 - tmp38 tmp40 = tl_math.exp(tmp39) tmp42 = tmp40 / tmp41 tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp22, xmask) tl.store(out_ptr2 + x3, tmp32, xmask) tl.store(out_ptr3 + x3, tmp42, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp18 & xmask, other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp30 & xmask, other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp39 & xmask, other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x3, tmp52, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_gt_2[grid(256)](primals_4, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10) del primals_5 buf11 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf10, buf11, 512, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (64, 8), (8, 1), 0), primals_6, out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_7, out=buf19) del primals_7 buf20 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf19, buf20, 512, XBLOCK=256, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (64, 8), (8, 1), 0), primals_8, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf21, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_9, out=buf28) del primals_9 buf29 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf28, buf29, 512, XBLOCK=256, num_warps=4, num_stages=1) buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf29, (64, 8), (8, 1), 0), primals_10, out=buf30) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf32 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(64)](buf4, buf3, buf2, buf13, buf12, buf22, buf21, buf31, buf30, buf5, buf6, buf14, buf15, buf23, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_4[grid(256)](buf4, buf3, buf2, buf5, buf6, buf13, buf12, buf14, buf15, buf22, buf21, buf23, buf24, buf31, buf30, buf32, buf33, buf7, buf16, buf25, buf34, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del buf14 del buf15 del buf2 del buf21 del buf23 del buf24 del buf30 del buf32 del buf33 del buf5 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf0, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf10, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18 ) buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf19, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf19 buf27 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf27 ) buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf28, buf35, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf28 buf36 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36 ) buf37 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused_cat_6[grid(1024)](buf9, buf18, buf27, buf36, buf37, 1024, XBLOCK=128, num_warps=4, num_stages=1) return (buf37, buf3, buf4, buf7, buf9, buf13, buf16, buf18, buf22, buf25, buf27, buf31, buf34, buf36, reinterpret_tensor(buf35, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf29, (8, 64), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf20, (8, 64), (1, 8), 0), reinterpret_tensor( primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf11, (8, 64), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor( primals_3, (1, 8), (1, 1), 0)) class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.matmul(input, self.W) N = h.size()[1] batch_size = h.size(0) a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1), h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self. out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, input_0, input_1): primals_2 = self.attention_0.W primals_3 = self.attention_0.a primals_5 = self.attention_1.W primals_6 = self.attention_1.a primals_7 = self.attention_2.W primals_8 = self.attention_2.a primals_9 = self.attention_3.W primals_10 = self.attention_3.a primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
daiki-kimura/commonsense-rl
GAT
false
12,256
[ "Apache-2.0" ]
0
5513926957b6501ce9cfa46f77f8f2c1c4892fa5
https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5
FactorizationMachine
from torch.nn import Module import math import torch import numpy as np from torch.nn import * from torch.optim import AdamW from typing import Union class FactorizationMachine(Module): """ [Factorization Machine Recommendation Model] Learns latent space features to characterize similarity of dataset features to compute a recommendation as a function of dataset features. Dataset features can be mixed / hybrid such that you can combine information on both the recommended object and the recommendation target to generate an informed similarity or recommendation / ranking metric. """ def __init__(self, data_dim, hidden_dim=25, seed=None) ->None: """ Instantiate class attributes for FM. Constructs a feature similarity matrix F of shape (x_features, hidden_dim) to learn implicit representations of all trainable features in the data for recommendation or ranking. :param data_dim <int>: Number of features to learn from in the dataset. :param hidden_dim <int>: Dimension of the latent space of features. :param seed <int>: Random seed fixture for reproducibility. """ super().__init__() self.input_dim = data_dim self.hidden_dim = hidden_dim self.torch_gen = None if seed is not None: self.torch_gen = torch.manual_seed(seed) """ Matrix Factorization """ self.F = Parameter(torch.empty((self.input_dim, self.hidden_dim)), requires_grad=True) init.xavier_uniform_(self.F) """ Linear Regression """ self.V = Parameter(torch.empty((self.input_dim, 1)), requires_grad=True ) init.xavier_uniform_(self.V) self.bias = Parameter(torch.zeros(1), requires_grad=True) """ Gaussian Regression """ self.gaussian_dist = Linear(in_features=self.hidden_dim, out_features=2 ) def forward(self, x: 'torch.Tensor'): """ Compute FactorizationMachine(x). Returns a mean and standard deviation for the recommendation. :param x <torch.Tensor>: Factorization machine input Tensor of shape (N, input_dim). """ sq_sm = torch.matmul(x, self.F) ** 2 sm_sq = torch.matmul(x ** 2, self.F ** 2) lin_reg = torch.matmul(x, self.V) latent = self.bias + lin_reg + 0.5 * sq_sm - sm_sq output = self.gaussian_dist(latent) return output[:, 0], torch.abs(output[:, 1]) def fit(self, X: 'Union[torch.Tensor, np.ndarray]', Y: 'Union[torch.Tensor, np.ndarray]', mask: 'Union[torch.Tensor, np.ndarray]'=None, cycles=100, lr=0.002, batch_frac=0.01, regularize=0.01, patience=3, verbose=False): """ Train the Factorization Machine. :param X <torch.Tensor>: Input training data features of shape (N, X). :param Y <torch.Tensor>: Target training data class / score vector of shape (N, 1). :param mask <torch.Tensor>: Feature observability mask for X of shape (N, X). :param cycles <int>: Number of gradient descent cycles. :param lr <float>: Learning rate. Re-calibrated to order of values in matrix M. :param batch_frac <float>: Fraction of the dataset to set as the batch size. :param regularize <float>: Weight decay lambda for regularization in AdamW. :param patience <int>: Number of cycles of convergence before termination. :param verbose <bool>: Output training progress information. """ if any([len(X.shape) != 2, len(Y.shape) != 2, mask is not None and mask.shape != X.shape, X.shape[1] != self.input_dim, Y.shape[1] != 1, cycles <= 0, lr <= 0, batch_frac <= 0, regularize < 0]): None return N = X.shape[0] if not torch.is_tensor(X): X = torch.Tensor(X) if not torch.is_tensor(Y): Y = torch.Tensor(Y) mask_tensor = torch.ones(X.shape) if mask is not None: mask_tensor = torch.where(torch.Tensor(mask) != 0, 1, 0) optimizer = AdamW(self.parameters(), lr=lr, weight_decay=regularize) model_opt = dict(self.state_dict()) loss_opt = float('inf') timer = 0 for i in range(cycles): for _ in range(math.ceil(1 / batch_frac)): rand_idx = torch.randint(N, size=(math.ceil(batch_frac * N) ,), generator=self.torch_gen) X_batch = X[rand_idx] Y_batch = Y[rand_idx] mask_batch = mask_tensor[rand_idx] self.zero_grad() Y_mu, Y_sigma = self(X_batch * mask_batch) loss = GaussianNLLLoss()(Y_mu, Y_batch, Y_sigma) loss.sum().backward() optimizer.step() if i % math.ceil(cycles / 5) == 0 and verbose: None if loss.sum().item() < loss_opt: model_opt = dict(self.state_dict()) loss_opt = loss.sum().item() timer = 0 else: timer += 1 if timer > patience: self.load_state_dict(model_opt) break def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'data_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math import numpy as np from torch.nn import * from torch.optim import AdamW from typing import Union assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 100 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_mul_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 25 x2 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp4 * tmp4 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_abs_sgn_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 32 * x1), xmask) tmp1 = tl_math.abs(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = tmp2 < tmp0 tmp4 = tmp3.to(tl.int8) tmp5 = tmp0 < tmp2 tmp6 = tmp5.to(tl.int8) tmp7 = tmp4 - tmp6 tmp8 = tmp7.to(tmp0.dtype) tl.store(out_ptr0 + x2, tmp1, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 25), (25, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (2, 25), (25, 1)) assert_size_stride(primals_6, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 25), (25, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_view_0[grid(256)](primals_2, buf1, 256, XBLOCK =128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 25), (25, 1), torch.float32) triton_poi_fused_pow_1[grid(100)](primals_1, buf2, 100, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 25), (25, 1), torch.float32) extern_kernels.mm(buf1, buf2, out=buf3) del buf2 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf4) del primals_3 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 25), (400, 100, 25, 1), 0) del buf3 triton_poi_fused_add_mul_pow_sub_2[grid(1600)](buf5, primals_4, buf4, buf0, 1600, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_4 buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf5, (64, 25), (25, 1), 0), reinterpret_tensor(primals_5, (25, 2), (1, 25), 0), alpha=1, beta=1, out=buf6) del primals_6 buf7 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_abs_sgn_3[grid(32)](buf6, buf7, buf8, 32, XBLOCK= 32, num_warps=1, num_stages=1) return reinterpret_tensor(buf6, (4, 4, 2), (32, 2, 1), 0 ), buf7, primals_1, buf0, reinterpret_tensor(buf5, (64, 25), (25, 1), 0 ), buf8, primals_5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0) class FactorizationMachineNew(Module): """ [Factorization Machine Recommendation Model] Learns latent space features to characterize similarity of dataset features to compute a recommendation as a function of dataset features. Dataset features can be mixed / hybrid such that you can combine information on both the recommended object and the recommendation target to generate an informed similarity or recommendation / ranking metric. """ def __init__(self, data_dim, hidden_dim=25, seed=None) ->None: """ Instantiate class attributes for FM. Constructs a feature similarity matrix F of shape (x_features, hidden_dim) to learn implicit representations of all trainable features in the data for recommendation or ranking. :param data_dim <int>: Number of features to learn from in the dataset. :param hidden_dim <int>: Dimension of the latent space of features. :param seed <int>: Random seed fixture for reproducibility. """ super().__init__() self.input_dim = data_dim self.hidden_dim = hidden_dim self.torch_gen = None if seed is not None: self.torch_gen = torch.manual_seed(seed) """ Matrix Factorization """ self.F = Parameter(torch.empty((self.input_dim, self.hidden_dim)), requires_grad=True) init.xavier_uniform_(self.F) """ Linear Regression """ self.V = Parameter(torch.empty((self.input_dim, 1)), requires_grad=True ) init.xavier_uniform_(self.V) self.bias = Parameter(torch.zeros(1), requires_grad=True) """ Gaussian Regression """ self.gaussian_dist = Linear(in_features=self.hidden_dim, out_features=2 ) def fit(self, X: 'Union[torch.Tensor, np.ndarray]', Y: 'Union[torch.Tensor, np.ndarray]', mask: 'Union[torch.Tensor, np.ndarray]'=None, cycles=100, lr=0.002, batch_frac=0.01, regularize=0.01, patience=3, verbose=False): """ Train the Factorization Machine. :param X <torch.Tensor>: Input training data features of shape (N, X). :param Y <torch.Tensor>: Target training data class / score vector of shape (N, 1). :param mask <torch.Tensor>: Feature observability mask for X of shape (N, X). :param cycles <int>: Number of gradient descent cycles. :param lr <float>: Learning rate. Re-calibrated to order of values in matrix M. :param batch_frac <float>: Fraction of the dataset to set as the batch size. :param regularize <float>: Weight decay lambda for regularization in AdamW. :param patience <int>: Number of cycles of convergence before termination. :param verbose <bool>: Output training progress information. """ if any([len(X.shape) != 2, len(Y.shape) != 2, mask is not None and mask.shape != X.shape, X.shape[1] != self.input_dim, Y.shape[1] != 1, cycles <= 0, lr <= 0, batch_frac <= 0, regularize < 0]): None return N = X.shape[0] if not torch.is_tensor(X): X = torch.Tensor(X) if not torch.is_tensor(Y): Y = torch.Tensor(Y) mask_tensor = torch.ones(X.shape) if mask is not None: mask_tensor = torch.where(torch.Tensor(mask) != 0, 1, 0) optimizer = AdamW(self.parameters(), lr=lr, weight_decay=regularize) model_opt = dict(self.state_dict()) loss_opt = float('inf') timer = 0 for i in range(cycles): for _ in range(math.ceil(1 / batch_frac)): rand_idx = torch.randint(N, size=(math.ceil(batch_frac * N) ,), generator=self.torch_gen) X_batch = X[rand_idx] Y_batch = Y[rand_idx] mask_batch = mask_tensor[rand_idx] self.zero_grad() Y_mu, Y_sigma = self(X_batch * mask_batch) loss = GaussianNLLLoss()(Y_mu, Y_batch, Y_sigma) loss.sum().backward() optimizer.step() if i % math.ceil(cycles / 5) == 0 and verbose: None if loss.sum().item() < loss_opt: model_opt = dict(self.state_dict()) loss_opt = loss.sum().item() timer = 0 else: timer += 1 if timer > patience: self.load_state_dict(model_opt) break def forward(self, input_0): primals_1 = self.F primals_3 = self.V primals_4 = self.bias primals_5 = self.gaussian_dist.weight primals_6 = self.gaussian_dist.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
cspades/algorithm-toolkit
FactorizationMachine
false
12,257
[ "Apache-2.0" ]
0
8731112162fb60f8ef3ab3c38524456ae96f0c2d
https://github.com/cspades/algorithm-toolkit/tree/8731112162fb60f8ef3ab3c38524456ae96f0c2d
C2
import torch import torch.nn as nn from collections import OrderedDict class C2(nn.Module): def __init__(self) ->None: super(C2, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(16, 32, kernel_size=(3, 3), bias=True)), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c2(img) return output def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 32 x0 = xindex % 3844 x4 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = xindex // 31 % 31 x2 = xindex // 961 x5 = xindex x4 = xindex // 30752 x6 = xindex % 30752 tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 30848 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 32, 62, 62), (123904, 3872, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(492032)](buf0, primals_2, buf1, 492032, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(123008)](buf1, buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C2New(nn.Module): def __init__(self) ->None: super(C2New, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(16, 32, kernel_size=(3, 3), bias=True)), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c2.c2.weight primals_2 = self.c2.c2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
devillove084/DeepSignal
C2
false
12,258
[ "MIT" ]
0
1fe122b32752b11e10ca4bef3d07ddd7de4348b5
https://github.com/devillove084/DeepSignal/tree/1fe122b32752b11e10ca4bef3d07ddd7de4348b5
LinearWithGroupNorm
import torch import torch.utils.data from torch import nn from math import gcd import torch.cuda class LinearWithGroupNorm(nn.Module): def __init__(self, n_in: 'int', n_out: 'int', num_groups: 'int'=32, activation: 'bool'=True) ->None: """ Linear layer used in LaneGCN. :param n_in: Number of input channels. :param n_out: Number of output channels. :param num_groups: Number of groups for GroupNorm. :param activation: Boolean indicating whether to apply ReLU activation. """ super().__init__() self.linear = nn.Linear(n_in, n_out, bias=False) self.norm = nn.GroupNorm(gcd(num_groups, n_out), n_out) self.relu = nn.ReLU(inplace=True) self.activation = activation def forward(self, x: 'torch.Tensor') ->torch.Tensor: """ Apply linear layer to input tensor. :param x: Input tensor. :return: Output of linear layer. """ out = self.linear(x) out = self.norm(out) if self.activation: out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_in': 4, 'n_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn from math import gcd import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = 0.0 tmp31 = tmp29 <= tmp30 tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_threshold_backward_0[grid(16)]( buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 return buf5, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf6 class LinearWithGroupNormNew(nn.Module): def __init__(self, n_in: 'int', n_out: 'int', num_groups: 'int'=32, activation: 'bool'=True) ->None: """ Linear layer used in LaneGCN. :param n_in: Number of input channels. :param n_out: Number of output channels. :param num_groups: Number of groups for GroupNorm. :param activation: Boolean indicating whether to apply ReLU activation. """ super().__init__() self.linear = nn.Linear(n_in, n_out, bias=False) self.norm = nn.GroupNorm(gcd(num_groups, n_out), n_out) self.relu = nn.ReLU(inplace=True) self.activation = activation def forward(self, input_0): primals_1 = self.linear.weight primals_3 = self.norm.weight primals_4 = self.norm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
bradyz/nuplan-devkit
LinearWithGroupNorm
false
12,259
[ "Apache-2.0" ]
0
0a7a30e5d7fdf3787d9388676b7856fbd7d92992
https://github.com/bradyz/nuplan-devkit/tree/0a7a30e5d7fdf3787d9388676b7856fbd7d92992