entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ScoreNetwork
from torch.nn import Module import torch from torch.nn import Tanh from torch.nn import Linear class ScoreNetwork(Module): """ An optimized single hidden layer neural network for attention scores. The optimization idea behind this network is that projection of keys can performed only once without concatenation with query. It's allows to avoid unnecessary extra computations when attending every time-step over the same key-value pairs. """ def __init__(self, query_dim, hidden_dim, non_linearity=Tanh()): super(ScoreNetwork, self).__init__() self.query_dim = query_dim self.hidden_dim = hidden_dim self.query_proj = Linear(query_dim, hidden_dim, bias=True) self.non_lin = non_linearity self.hidden_to_out_proj = Linear(hidden_dim, 1) def forward(self, query, key): """ :param query: [batch_size, query_dim] :param key: [batch_size, seq_len, hidden_dim] :return: out: [batch_size, seq_len, 1] """ assert key.size(2) == self.hidden_dim query = self.query_proj(query) hidden = self.non_lin(query.unsqueeze(1) + key) out = self.hidden_to_out_proj(hidden) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'query_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn import Tanh from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 256 x4 = xindex % 64 x0 = xindex % 4 x5 = xindex % 256 x6 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + x6, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_3, primals_1, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 buf3 = reinterpret_tensor(buf0, (256, 1), (1, 1), 0) del buf0 extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (256, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 return reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0 ), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf1, primals_5 class ScoreNetworkNew(Module): """ An optimized single hidden layer neural network for attention scores. The optimization idea behind this network is that projection of keys can performed only once without concatenation with query. It's allows to avoid unnecessary extra computations when attending every time-step over the same key-value pairs. """ def __init__(self, query_dim, hidden_dim, non_linearity=Tanh()): super(ScoreNetworkNew, self).__init__() self.query_dim = query_dim self.hidden_dim = hidden_dim self.query_proj = Linear(query_dim, hidden_dim, bias=True) self.non_lin = non_linearity self.hidden_to_out_proj = Linear(hidden_dim, 1) def forward(self, input_0, input_1): primals_2 = self.query_proj.weight primals_3 = self.query_proj.bias primals_5 = self.hidden_to_out_proj.weight primals_6 = self.hidden_to_out_proj.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
stungkit/Copycat-abstractive-opinion-summarizer
ScoreNetwork
false
16,502
[ "MIT" ]
51
04fe5393a7bb6883516766b762f6a0c530e95375
https://github.com/stungkit/Copycat-abstractive-opinion-summarizer/tree/04fe5393a7bb6883516766b762f6a0c530e95375
ContrastiveLoss
import torch import torch.nn.functional as F class ContrastiveLoss(torch.nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) loss_contrastive = torch.mean((1 - label) * torch.pow( euclidean_distance, 2) + label * torch.pow(torch.clamp(self. margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 2.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(torch.nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
sugi-chan/project_pendragon
ContrastiveLoss
false
16,503
[ "MIT" ]
56
267624365f25964fece1952e6dcde629bbc2ee5b
https://github.com/sugi-chan/project_pendragon/tree/267624365f25964fece1952e6dcde629bbc2ee5b
Highway
import torch import torch.nn as nn import torch.nn.utils class Highway(nn.Module): def __init__(self, eword_size): super(Highway, self).__init__() self.eword_size = eword_size self.w_proj = nn.Linear(self.eword_size, self.eword_size, bias=True) self.w_gate = nn.Linear(self.eword_size, self.eword_size, bias=True) self.highway_ReLU = nn.ReLU() def forward(self, x_conv: 'torch.Tensor'): x_proj_pre = self.w_proj(x_conv) x_proj = self.highway_ReLU(x_proj_pre) x_gate_pre = self.w_gate(x_proj) x_gate = torch.sigmoid(x_gate_pre) x_highway = x_gate * x_proj + (1 - x_gate) * x_conv return x_highway def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'eword_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_1[grid(256)](buf2, buf1, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_3, buf1, buf2, primals_4 class HighwayNew(nn.Module): def __init__(self, eword_size): super(HighwayNew, self).__init__() self.eword_size = eword_size self.w_proj = nn.Linear(self.eword_size, self.eword_size, bias=True) self.w_gate = nn.Linear(self.eword_size, self.eword_size, bias=True) self.highway_ReLU = nn.ReLU() def forward(self, input_0): primals_1 = self.w_proj.weight primals_2 = self.w_proj.bias primals_4 = self.w_gate.weight primals_5 = self.w_gate.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
stxxllbu/CS224n-winter-together
Highway
false
16,504
[ "Apache-2.0" ]
468
eae158ed8e88dc7c8638e25bac4c4fc8eeddcc8c
https://github.com/stxxllbu/CS224n-winter-together/tree/eae158ed8e88dc7c8638e25bac4c4fc8eeddcc8c
MyKernelTorch
import torch import torch.nn as nn class MyKernelTorch(nn.Module): def __init__(self, n_features: 'int'): super().__init__() self.dense1 = nn.Linear(n_features, 20) self.dense2 = nn.Linear(20, 2) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = nn.ReLU()(self.dense1(x)) return self.dense2(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (20, 4), (4, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 20), (20, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1, primals_2, buf3, 1280, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 20), (20, 1), 0), reinterpret_tensor(primals_4, (20, 2), (1, 20), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 20), (20, 1), 0), primals_4, buf3 class MyKernelTorchNew(nn.Module): def __init__(self, n_features: 'int'): super().__init__() self.dense1 = nn.Linear(n_features, 20) self.dense2 = nn.Linear(20, 2) def forward(self, input_0): primals_1 = self.dense1.weight primals_2 = self.dense1.bias primals_4 = self.dense2.weight primals_5 = self.dense2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
sugatoray/alibi-detect
MyKernelTorch
false
16,505
[ "Apache-2.0" ]
1,227
66d7873c248c0be1a1d836e6fe1ef59351b802d9
https://github.com/sugatoray/alibi-detect/tree/66d7873c248c0be1a1d836e6fe1ef59351b802d9
S_Loss
import torch import torch.nn.functional as F from torch import nn class S_Loss(nn.Module): def __init__(self): super(S_Loss, self).__init__() def forward(self, x, label): loss = F.smooth_l1_loss(x, label) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class S_LossNew(nn.Module): def __init__(self): super(S_LossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
suyukun666/UFO
S_Loss
false
16,506
[ "MIT" ]
122
e57016948b03cd2f75155d2958cea69b6e4b56f8
https://github.com/suyukun666/UFO/tree/e57016948b03cd2f75155d2958cea69b6e4b56f8
PtModel
import torch import torch.nn as nn class PtModel(nn.Module): def __init__(self, n_features, n_labels, softmax=False, dropout=False): super().__init__() self.dense1 = nn.Linear(n_features, 20) self.dense2 = nn.Linear(20, n_labels) self.dropout = nn.Dropout(0.5) if dropout else lambda x: x self.softmax = nn.Softmax() if softmax else lambda x: x def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = nn.ReLU()(self.dense1(x)) x = self.dropout(x) x = self.dense2(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4, 'n_labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (20, 4), (4, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 20), (20, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1, primals_2, buf3, 1280, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 20), (20, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1, 20), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 20), (20, 1), 0), primals_4, buf3 class PtModelNew(nn.Module): def __init__(self, n_features, n_labels, softmax=False, dropout=False): super().__init__() self.dense1 = nn.Linear(n_features, 20) self.dense2 = nn.Linear(20, n_labels) self.dropout = nn.Dropout(0.5) if dropout else lambda x: x self.softmax = nn.Softmax() if softmax else lambda x: x def forward(self, input_0): primals_1 = self.dense1.weight primals_2 = self.dense1.bias primals_4 = self.dense2.weight primals_5 = self.dense2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
sugatoray/alibi-detect
PtModel
false
16,507
[ "Apache-2.0" ]
1,227
66d7873c248c0be1a1d836e6fe1ef59351b802d9
https://github.com/sugatoray/alibi-detect/tree/66d7873c248c0be1a1d836e6fe1ef59351b802d9
MLP
import torch import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self): super(MLP, self).__init__() self.fc1 = nn.Linear(in_features=28 * 28, out_features=500) self.fc2 = nn.Linear(in_features=500, out_features=200) self.fc3 = nn.Linear(in_features=200, out_features=100) self.out = nn.Linear(in_features=100, out_features=10) def forward(self, t): t = t.view(-1, 28 * 28) t = self.fc1(t) t = F.relu(t) t = self.fc2(t) t = F.relu(t) t = self.fc3(t) t = F.relu(t) t = self.out(t) return t def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (500, 784), (784, 1)) assert_size_stride(primals_3, (500,), (1,)) assert_size_stride(primals_4, (200, 500), (500, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (100, 200), (200, 1)) assert_size_stride(primals_7, (100,), (1,)) assert_size_stride(primals_8, (10, 100), (100, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 500), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2000)](buf1, primals_3, 2000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 200), (200, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (500, 200), ( 1, 500), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(800)](buf3, primals_5, 800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (200, 100), ( 1, 200), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(400)](buf5, primals_7, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_9 return buf6, primals_1, buf1, buf3, buf5, primals_8, primals_6, primals_4 class MLPNew(nn.Module): def __init__(self): super(MLPNew, self).__init__() self.fc1 = nn.Linear(in_features=28 * 28, out_features=500) self.fc2 = nn.Linear(in_features=500, out_features=200) self.fc3 = nn.Linear(in_features=200, out_features=100) self.out = nn.Linear(in_features=100, out_features=10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.out.weight primals_9 = self.out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
stjordanis/ml-cheatsheet
MLP
false
16,508
[ "MIT" ]
1,031
d34e096032b7ae826868be8808aee01699cec491
https://github.com/stjordanis/ml-cheatsheet/tree/d34e096032b7ae826868be8808aee01699cec491
ToRGB
from torch.autograd import Function import math import torch import torch.nn as nn import torch.nn.functional as F def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): if input.device.type == 'cpu': rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2) * scale else: return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def get_haar_wavelet(in_channels): haar_wav_l = 1 / 2 ** 0.5 * torch.ones(1, 2) haar_wav_h = 1 / 2 ** 0.5 * torch.ones(1, 2) haar_wav_h[0, 0] = -1 * haar_wav_h[0, 0] haar_wav_ll = haar_wav_l.T * haar_wav_l haar_wav_lh = haar_wav_h.T * haar_wav_l haar_wav_hl = haar_wav_l.T * haar_wav_h haar_wav_hh = haar_wav_h.T * haar_wav_h return haar_wav_ll, haar_wav_lh, haar_wav_hl, haar_wav_hh class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class HaarTransform(nn.Module): def __init__(self, in_channels): super().__init__() ll, lh, hl, hh = get_haar_wavelet(in_channels) self.register_buffer('ll', ll) self.register_buffer('lh', lh) self.register_buffer('hl', hl) self.register_buffer('hh', hh) def forward(self, input): ll = upfirdn2d(input, self.ll, down=2) lh = upfirdn2d(input, self.lh, down=2) hl = upfirdn2d(input, self.hl, down=2) hh = upfirdn2d(input, self.hh, down=2) return torch.cat((ll, lh, hl, hh), 1) class InverseHaarTransform(nn.Module): def __init__(self, in_channels): super().__init__() ll, lh, hl, hh = get_haar_wavelet(in_channels) self.register_buffer('ll', ll) self.register_buffer('lh', -lh) self.register_buffer('hl', -hl) self.register_buffer('hh', hh) def forward(self, input): ll, lh, hl, hh = input.chunk(4, 1) ll = upfirdn2d(ll, self.ll, up=2, pad=(1, 0, 1, 0)) lh = upfirdn2d(lh, self.lh, up=2, pad=(1, 0, 1, 0)) hl = upfirdn2d(hl, self.hl, up=2, pad=(1, 0, 1, 0)) hh = upfirdn2d(hh, self.hh, up=2, pad=(1, 0, 1, 0)) return ll + lh + hl + hh class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.iwt = InverseHaarTransform(3) self.upsample = Upsample(blur_kernel) self.dwt = HaarTransform(3) self.conv = ModulatedConv2d(in_channel, 3 * 4, 1, style_dim, demodulate=False) self.bias = nn.Parameter(torch.zeros(1, 3 * 4, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.iwt(skip) skip = self.upsample(skip) skip = self.dwt(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 48 x0 = xindex % 4 x2 = xindex // 48 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 12, 4, 1, 1), (48, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 12, 1, 1), (12, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 12, 4, 1, 1), (48, 4, 1, 1, 1), torch .float32) triton_poi_fused_mul_2[grid(192)](primals_5, buf2, buf3, 192, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (48, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 48, 4, 4), (768, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 12, 4, 4), (192, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(768)](buf5, primals_6, 768, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (48, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): if input.device.type == 'cpu': rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2) * scale else: return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def get_haar_wavelet(in_channels): haar_wav_l = 1 / 2 ** 0.5 * torch.ones(1, 2) haar_wav_h = 1 / 2 ** 0.5 * torch.ones(1, 2) haar_wav_h[0, 0] = -1 * haar_wav_h[0, 0] haar_wav_ll = haar_wav_l.T * haar_wav_l haar_wav_lh = haar_wav_h.T * haar_wav_l haar_wav_hl = haar_wav_l.T * haar_wav_h haar_wav_hh = haar_wav_h.T * haar_wav_h return haar_wav_ll, haar_wav_lh, haar_wav_hl, haar_wav_hh class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class HaarTransform(nn.Module): def __init__(self, in_channels): super().__init__() ll, lh, hl, hh = get_haar_wavelet(in_channels) self.register_buffer('ll', ll) self.register_buffer('lh', lh) self.register_buffer('hl', hl) self.register_buffer('hh', hh) def forward(self, input): ll = upfirdn2d(input, self.ll, down=2) lh = upfirdn2d(input, self.lh, down=2) hl = upfirdn2d(input, self.hl, down=2) hh = upfirdn2d(input, self.hh, down=2) return torch.cat((ll, lh, hl, hh), 1) class InverseHaarTransform(nn.Module): def __init__(self, in_channels): super().__init__() ll, lh, hl, hh = get_haar_wavelet(in_channels) self.register_buffer('ll', ll) self.register_buffer('lh', -lh) self.register_buffer('hl', -hl) self.register_buffer('hh', hh) def forward(self, input): ll, lh, hl, hh = input.chunk(4, 1) ll = upfirdn2d(ll, self.ll, up=2, pad=(1, 0, 1, 0)) lh = upfirdn2d(lh, self.lh, up=2, pad=(1, 0, 1, 0)) hl = upfirdn2d(hl, self.hl, up=2, pad=(1, 0, 1, 0)) hh = upfirdn2d(hh, self.hh, up=2, pad=(1, 0, 1, 0)) return ll + lh + hl + hh class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.iwt = InverseHaarTransform(3) self.upsample = Upsample(blur_kernel) self.dwt = HaarTransform(3) self.conv = ModulatedConv2d(in_channel, 3 * 4, 1, style_dim, demodulate=False) self.bias = nn.Parameter(torch.zeros(1, 3 * 4, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
songquanpeng/BlendGAN
ToRGB
false
16,509
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
67
cbf7225c50c548ee955614715ae3f8fa4d68ee13
https://github.com/songquanpeng/BlendGAN/tree/cbf7225c50c548ee955614715ae3f8fa4d68ee13
SoftCrossEntropyLoss2d
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils class SoftCrossEntropyLoss2d(nn.Module): def __init__(self): super(SoftCrossEntropyLoss2d, self).__init__() def forward(self, inputs, targets): loss = 0 inputs = -F.log_softmax(inputs, dim=1) for index in range(inputs.size()[0]): loss += F.conv2d(inputs[range(index, index + 1)], targets[range (index, index + 1)]) / (targets.size()[2] * targets.size()[3]) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_neg_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp14 = -tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused_index_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_index_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (64 + x1 + 16 * y0), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_index_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (128 + x1 + 16 * y0), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_index_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (192 + x1 + 16 * y0), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_div_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp10 = tl.load(in_out_ptr0 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr2 + 0) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp2 = 0.0625 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = tmp3 + tmp4 tmp8 = tmp7 * tmp2 tmp9 = tmp5 + tmp8 tmp12 = tmp11 * tmp2 tmp13 = tmp9 + tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_neg_1[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_index_2[grid(4, 16)](buf1, buf2, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_index_2[grid(4, 16)](arg1_1, buf3, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf2, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (1, 1, 1, 1), (1, 1, 1, 1)) buf5 = buf3 del buf3 triton_poi_fused_index_3[grid(4, 16)](buf1, buf5, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf6 = buf2 del buf2 triton_poi_fused_index_3[grid(4, 16)](arg1_1, buf6, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf7 = extern_kernels.convolution(buf5, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 1, 1, 1), (1, 1, 1, 1)) buf8 = buf6 del buf6 triton_poi_fused_index_4[grid(4, 16)](buf1, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = buf5 del buf5 triton_poi_fused_index_4[grid(4, 16)](arg1_1, buf9, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf10 = extern_kernels.convolution(buf8, buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (1, 1, 1, 1), (1, 1, 1, 1)) buf11 = buf9 del buf9 triton_poi_fused_index_5[grid(4, 16)](buf1, buf11, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del buf1 buf12 = buf8 del buf8 triton_poi_fused_index_5[grid(4, 16)](arg1_1, buf12, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) del arg1_1 buf13 = extern_kernels.convolution(buf11, buf12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (1, 1, 1, 1), (1, 1, 1, 1)) del buf11 del buf12 buf14 = buf10 del buf10 triton_poi_fused_add_div_6[grid(1)](buf14, buf4, buf7, buf13, 1, XBLOCK=1, num_warps=1, num_stages=1) del buf13 del buf4 del buf7 return buf14, class SoftCrossEntropyLoss2dNew(nn.Module): def __init__(self): super(SoftCrossEntropyLoss2dNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
songzijiang/FasterSeg
SoftCrossEntropyLoss2d
false
16,510
[ "MIT" ]
334
1a14ef6dd665afd229a16ab43b532b5a406512f8
https://github.com/songzijiang/FasterSeg/tree/1a14ef6dd665afd229a16ab43b532b5a406512f8
BinaryTreeLeafModule
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class BinaryTreeLeafModule(nn.Module): """ local input = nn.Identity()() local c = nn.Linear(self.in_dim, self.mem_dim)(input) local h if self.gate_output then local o = nn.Sigmoid()(nn.Linear(self.in_dim, self.mem_dim)(input)) h = nn.CMulTable(){o, nn.Tanh()(c)} else h = nn.Tanh()(c) end local leaf_module = nn.gModule({input}, {c, h}) """ def __init__(self, cuda, in_dim, mem_dim): super(BinaryTreeLeafModule, self).__init__() self.cudaFlag = cuda self.in_dim = in_dim self.mem_dim = mem_dim self.cx = nn.Linear(self.in_dim, self.mem_dim) self.ox = nn.Linear(self.in_dim, self.mem_dim) if self.cudaFlag: self.cx = self.cx self.ox = self.ox def forward(self, input): c = self.cx(input) o = F.sigmoid(self.ox(input)) h = o * F.tanh(c) return c, h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cuda': False, 'in_dim': 4, 'mem_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(256)](buf1, buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf1 class BinaryTreeLeafModuleNew(nn.Module): """ local input = nn.Identity()() local c = nn.Linear(self.in_dim, self.mem_dim)(input) local h if self.gate_output then local o = nn.Sigmoid()(nn.Linear(self.in_dim, self.mem_dim)(input)) h = nn.CMulTable(){o, nn.Tanh()(c)} else h = nn.Tanh()(c) end local leaf_module = nn.gModule({input}, {c, h}) """ def __init__(self, cuda, in_dim, mem_dim): super(BinaryTreeLeafModuleNew, self).__init__() self.cudaFlag = cuda self.in_dim = in_dim self.mem_dim = mem_dim self.cx = nn.Linear(self.in_dim, self.mem_dim) self.ox = nn.Linear(self.in_dim, self.mem_dim) if self.cudaFlag: self.cx = self.cx self.ox = self.ox def forward(self, input_0): primals_1 = self.cx.weight primals_2 = self.cx.bias primals_4 = self.ox.weight primals_5 = self.ox.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
supunab/Lantern
BinaryTreeLeafModule
false
16,511
[ "BSD-3-Clause" ]
158
932a031816617d71c46653f3b2245129a6a8a7c8
https://github.com/supunab/Lantern/tree/932a031816617d71c46653f3b2245129a6a8a7c8
VAE
import torch import numpy as np from abc import ABC from abc import abstractmethod import torch.nn.functional as F from torch.functional import F from torch import nn from typing import * from torch.nn import functional as F def to_array_as(x, y): if isinstance(x, torch.Tensor) and isinstance(y, np.ndarray): return x.detach().cpu().numpy().astype(y.dtype) elif isinstance(x, np.ndarray) and isinstance(y, torch.Tensor): return torch.tensor(x) else: return x class BasePolicy(ABC): @abstractmethod def policy_infer(self, obs): pass def get_action(self, obs): obs_tensor = torch.tensor(obs, device=next(self.parameters()). device, dtype=torch.float32) act = to_array_as(self.policy_infer(obs_tensor), obs) return act class VAE(nn.Module, BasePolicy): def __init__(self, state_dim, action_dim, latent_dim, max_action, hidden_size=750): super(VAE, self).__init__() self.e1 = nn.Linear(state_dim + action_dim, hidden_size) self.e2 = nn.Linear(hidden_size, hidden_size) self.mean = nn.Linear(hidden_size, latent_dim) self.log_std = nn.Linear(hidden_size, latent_dim) self.d1 = nn.Linear(state_dim + latent_dim, hidden_size) self.d2 = nn.Linear(hidden_size, hidden_size) self.d3 = nn.Linear(hidden_size, action_dim) self.max_action = max_action self.latent_dim = latent_dim self._actor = None def forward(self, state, action): z = F.relu(self.e1(torch.cat([state, action], 1))) z = F.relu(self.e2(z)) mean = self.mean(z) log_std = self.log_std(z).clamp(-4, 15) std = torch.exp(log_std) z = mean + std * torch.randn_like(std) u = self.decode(state, z) return u, mean, std def decode(self, state, z=None, clip=None, raw=False): if z is None: z = torch.randn((state.shape[0], self.latent_dim)) if clip is not None: z = z.clamp(-clip, clip) a = F.relu(self.d1(torch.cat([state, z], 1))) a = F.relu(self.d2(a)) a = self.d3(a) if raw: return a return self.max_action * torch.tanh(a) def policy_infer(self, obs): return self.decode(obs, z=self._actor(obs)[0]) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'latent_dim': 4, 'max_action': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from abc import ABC from abc import abstractmethod import torch.nn.functional as F from torch.functional import F from torch import nn from typing import * from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 3000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 750 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clamp_exp_ge_le_logical_and_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -4.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 >= tmp3 tmp9 = tmp2 <= tmp5 tmp10 = tmp8 & tmp9 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr3 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (750, 8), (8, 1)) assert_size_stride(primals_4, (750,), (1,)) assert_size_stride(primals_5, (750, 750), (750, 1)) assert_size_stride(primals_6, (750,), (1,)) assert_size_stride(primals_7, (4, 750), (750, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 750), (750, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (750, 8), (8, 1)) assert_size_stride(primals_12, (750,), (1,)) assert_size_stride(primals_13, (750, 750), (750, 1)) assert_size_stride(primals_14, (750,), (1,)) assert_size_stride(primals_15, (4, 750), (750, 1)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 750), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(3000)](buf2, primals_4, 3000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (750, 750), ( 1, 750), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(3000)](buf4, primals_6, 3000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (750, 4), (1, 750), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_9, (750, 4), (1, 750), 0), out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_clamp_exp_ge_le_logical_and_2[grid(16)](buf6, primals_10, buf7, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf8 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_3[grid(32)](primals_1, buf5, buf7, buf9, buf10, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf11 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (8, 750), ( 1, 8), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_1[grid(3000)](buf12, primals_12, 3000, XBLOCK =256, num_warps=4, num_stages=1) del primals_12 buf13 = empty_strided_cuda((4, 750), (750, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (750, 750), (1, 750), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_1[grid(3000)](buf14, primals_14, 3000, XBLOCK =256, num_warps=4, num_stages=1) del primals_14 buf15 = buf6 del buf6 extern_kernels.addmm(primals_16, buf14, reinterpret_tensor( primals_15, (750, 4), (1, 750), 0), alpha=1, beta=1, out=buf15) del primals_16 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_tanh_4[grid(16)](buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) return (buf16, buf5, buf7, buf0, buf2, buf4, buf7, buf9, buf10, buf12, buf14, buf15, primals_15, primals_13, primals_11, buf17, primals_9, primals_7, primals_5) def to_array_as(x, y): if isinstance(x, torch.Tensor) and isinstance(y, np.ndarray): return x.detach().cpu().numpy().astype(y.dtype) elif isinstance(x, np.ndarray) and isinstance(y, torch.Tensor): return torch.tensor(x) else: return x class BasePolicy(ABC): @abstractmethod def policy_infer(self, obs): pass def get_action(self, obs): obs_tensor = torch.tensor(obs, device=next(self.parameters()). device, dtype=torch.float32) act = to_array_as(self.policy_infer(obs_tensor), obs) return act class VAENew(nn.Module, BasePolicy): def __init__(self, state_dim, action_dim, latent_dim, max_action, hidden_size=750): super(VAENew, self).__init__() self.e1 = nn.Linear(state_dim + action_dim, hidden_size) self.e2 = nn.Linear(hidden_size, hidden_size) self.mean = nn.Linear(hidden_size, latent_dim) self.log_std = nn.Linear(hidden_size, latent_dim) self.d1 = nn.Linear(state_dim + latent_dim, hidden_size) self.d2 = nn.Linear(hidden_size, hidden_size) self.d3 = nn.Linear(hidden_size, action_dim) self.max_action = max_action self.latent_dim = latent_dim self._actor = None def decode(self, state, z=None, clip=None, raw=False): if z is None: z = torch.randn((state.shape[0], self.latent_dim)) if clip is not None: z = z.clamp(-clip, clip) a = F.relu(self.d1(torch.cat([state, z], 1))) a = F.relu(self.d2(a)) a = self.d3(a) if raw: return a return self.max_action * torch.tanh(a) def policy_infer(self, obs): return self.decode(obs, z=self._actor(obs)[0]) def forward(self, input_0, input_1): primals_3 = self.e1.weight primals_4 = self.e1.bias primals_5 = self.e2.weight primals_6 = self.e2.bias primals_7 = self.mean.weight primals_8 = self.mean.bias primals_9 = self.log_std.weight primals_10 = self.log_std.bias primals_11 = self.d1.weight primals_12 = self.d1.bias primals_13 = self.d2.weight primals_14 = self.d2.bias primals_15 = self.d3.weight primals_16 = self.d3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0], output[1], output[2]
ssimonc/NeoRL
VAE
false
16,512
[ "Apache-2.0" ]
50
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
https://github.com/ssimonc/NeoRL/tree/098c58c8e4c3e43e67803f6384619d3bfe7fce5d
Weighed_Bce_Loss
import torch import torch.nn.functional as F from torch import nn class Weighed_Bce_Loss(nn.Module): def __init__(self): super(Weighed_Bce_Loss, self).__init__() def forward(self, x, label): x = x.view(-1, 1, x.shape[1], x.shape[2]) label = label.view(-1, 1, label.shape[1], label.shape[2]) label_t = (label == 1).float() label_f = (label == 0).float() p = torch.sum(label_t) / (torch.sum(label_t) + torch.sum(label_f)) w = torch.zeros_like(label) w[label == 1] = p w[label == 0] = 1 - p loss = F.binary_cross_entropy(x, label, weight=w) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_add_binary_cross_entropy_div_eq_index_put_rsub_sum_zeros_like_0( in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp19 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 == tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.0 tmp8 = tmp0 == tmp7 tmp9 = tmp8.to(tl.float32) tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = tmp6 + tmp12 tmp14 = tmp6 / tmp13 tmp15 = tl.where(tmp2, tmp14, tmp7) tmp16 = tmp1 - tmp14 tmp17 = tl.where(tmp8, tmp16, tmp15) tmp18 = tmp0 - tmp1 tmp20 = -tmp19 tmp21 = libdevice.log1p(tmp20) tmp22 = -100.0 tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = tmp18 * tmp23 tmp25 = tl_math.log(tmp19) tmp26 = triton_helpers.maximum(tmp25, tmp22) tmp27 = tmp0 * tmp26 tmp28 = tmp24 - tmp27 tmp29 = tmp28 * tmp17 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = 256.0 tmp34 = tmp32 / tmp33 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp34, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 get_raw_stream(0) triton_per_fused__to_copy_add_binary_cross_entropy_div_eq_index_put_rsub_sum_zeros_like_0[ grid(1)](buf6, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf6, class Weighed_Bce_LossNew(nn.Module): def __init__(self): super(Weighed_Bce_LossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
suyukun666/UFO
Weighed_Bce_Loss
false
16,513
[ "MIT" ]
122
e57016948b03cd2f75155d2958cea69b6e4b56f8
https://github.com/suyukun666/UFO/tree/e57016948b03cd2f75155d2958cea69b6e4b56f8
Conv2dWithConstraint
import torch import torch as th from torch import nn class Conv2dWithConstraint(nn.Conv2d): def __init__(self, *args, max_norm=1, **kwargs): self.max_norm = max_norm super(Conv2dWithConstraint, self).__init__(*args, **kwargs) def forward(self, x): self.weight.data = th.renorm(self.weight.data, p=2, dim=0, maxnorm= self.max_norm) return super(Conv2dWithConstraint, self).forward(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_renorm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl .constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = 1.0 tmp8 = tmp6 > tmp7 tmp9 = 1e-07 tmp10 = tmp6 + tmp9 tmp11 = tl.full([1, 1], 1, tl.int32) tmp12 = tmp11 / tmp10 tmp13 = tmp12 * tmp7 tmp14 = tl.where(tmp8, tmp13, tmp7) tmp15 = tmp0 * tmp14 tl.store(out_ptr1 + (r1 + 64 * x0), tmp15, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_renorm_0[grid(4)](primals_1, buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(primals_3, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = torch.ops.aten.set_.source_Tensor(primals_1, buf1) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) del buf2 del primals_1 return buf3, primals_3, buf1 class Conv2dWithConstraintNew(nn.Conv2d): def __init__(self, *args, max_norm=1, **kwargs): self.max_norm = max_norm super(Conv2dWithConstraintNew, self).__init__(*args, **kwargs) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
sylvchev/braindecode
Conv2dWithConstraint
false
16,514
[ "BSD-3-Clause" ]
260
c37ace8fcb90eee0d447c97d1c0a06ce58e8f6ad
https://github.com/sylvchev/braindecode/tree/c37ace8fcb90eee0d447c97d1c0a06ce58e8f6ad
Unet
import torch from torch import nn import torch.nn.functional as F class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm=None, residual=True, activation='leakyrelu', in_place_activation=True, transpose=False, reflectpad=True): super(ConvBlock, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose self.reflectpad = reflectpad if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm is not None: if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=0 if self.reflectpad else 1) if self.activation == 'relu': self.actfun1 = nn.ReLU(inplace=in_place_activation) self.actfun2 = nn.ReLU(inplace=in_place_activation) elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU(inplace=in_place_activation) self.actfun2 = nn.LeakyReLU(inplace=in_place_activation) elif self.activation == 'elu': self.actfun1 = nn.ELU(inplace=in_place_activation) self.actfun2 = nn.ELU(inplace=in_place_activation) elif self.activation == 'selu': self.actfun1 = nn.SELU(inplace=in_place_activation) self.actfun2 = nn.SELU(inplace=in_place_activation) if self.reflectpad: self.rpad1 = nn.ReflectionPad2d(1) self.rpad2 = nn.ReflectionPad2d(1) def forward(self, x): ox = x if self.reflectpad: x = self.rpad1(x) x = self.conv1(x) if self.dropout: x = self.dropout1(x) x = self.actfun1(x) if self.norm1: x = self.norm1(x) if self.reflectpad: x = self.rpad2(x) x = self.conv2(x) if self.dropout: x = self.dropout2(x) if self.residual: x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox. shape[1], x.shape[1]), :, :] x = self.actfun2(x) if self.norm2: x = self.norm2(x) return x class Unet(nn.Module): def __init__(self, n_channel_in=1, n_channel_out=1, n_internal_channels =8, residual=True, down='avgpool', up='bilinear', activation= 'leakyrelu', norm=None, softmax=False): super(Unet, self).__init__() self.residual = residual self.softmax = softmax nic = n_internal_channels if down == 'maxpool': self.down1 = nn.MaxPool2d(kernel_size=2) self.down2 = nn.MaxPool2d(kernel_size=2) self.down3 = nn.MaxPool2d(kernel_size=2) self.down4 = nn.MaxPool2d(kernel_size=2) elif down == 'avgpool': self.down1 = nn.AvgPool2d(kernel_size=2) self.down2 = nn.AvgPool2d(kernel_size=2) self.down3 = nn.AvgPool2d(kernel_size=2) self.down4 = nn.AvgPool2d(kernel_size=2) elif down == 'convpool': self.down1 = nn.Conv2d(nic, nic, kernel_size=2, stride=2, groups=32 ) self.down2 = nn.Conv2d(nic * 2, nic * 2, kernel_size=2, stride= 2, groups=64) self.down3 = nn.Conv2d(nic * 4, nic * 4, kernel_size=2, stride= 2, groups=128) self.down4 = nn.Conv2d(nic * 8, nic * 8, kernel_size=2, stride= 2, groups=256) self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25 self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25 self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25 self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25 self.down1.bias.data = 0.01 * self.down1.bias.data + 0 self.down2.bias.data = 0.01 * self.down2.bias.data + 0 self.down3.bias.data = 0.01 * self.down3.bias.data + 0 self.down4.bias.data = 0.01 * self.down4.bias.data + 0 if up == 'bilinear' or up == 'nearest': self.up1 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) self.up2 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) self.up3 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) self.up4 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) elif up == 'tconv': self.up1 = nn.ConvTranspose2d(nic * 8, nic * 8, kernel_size=2, stride=2, groups=nic * 8) self.up2 = nn.ConvTranspose2d(nic * 4, nic * 4, kernel_size=2, stride=2, groups=nic * 4) self.up3 = nn.ConvTranspose2d(nic * 2, nic * 2, kernel_size=2, stride=2, groups=nic * 2) self.up4 = nn.ConvTranspose2d(nic, nic, kernel_size=2, stride=2, groups=nic) self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25 self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25 self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25 self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25 self.up1.bias.data = 0.01 * self.up1.bias.data + 0 self.up2.bias.data = 0.01 * self.up2.bias.data + 0 self.up3.bias.data = 0.01 * self.up3.bias.data + 0 self.up4.bias.data = 0.01 * self.up4.bias.data + 0 self.conv1 = ConvBlock(n_channel_in, nic, residual=residual, activation=activation, norm=norm) self.conv2 = ConvBlock(nic, nic * 2, residual=residual, activation= activation, norm=norm) self.conv3 = ConvBlock(nic * 2, nic * 4, residual=residual, activation=activation, norm=norm) self.conv4 = ConvBlock(nic * 4, nic * 8, residual=residual, activation=activation, norm=norm) self.conv5 = ConvBlock(nic * 8, nic * 8, residual=residual, activation=activation, norm=norm) self.conv6 = ConvBlock(2 * nic * 8, nic * 4, residual=residual, activation=activation, norm=norm) self.conv7 = ConvBlock(2 * nic * 4, nic * 2, residual=residual, activation=activation, norm=norm) self.conv8 = ConvBlock(2 * nic * 2, nic, residual=residual, activation=activation, norm=norm) self.conv9 = ConvBlock(2 * nic, n_channel_out, residual=residual, activation=activation, norm=norm) if self.residual: self.convres = ConvBlock(n_channel_in, n_channel_out, residual= residual, activation=activation, norm=norm) def forward(self, x): c0 = x c1 = self.conv1(x) x = self.down1(c1) c2 = self.conv2(x) x = self.down2(c2) c3 = self.conv3(x) x = self.down3(c3) c4 = self.conv4(x) x = self.down4(c4) if self.softmax: x = F.softmax(x, dim=1) x = self.conv5(x) x = self.up1(x) if self.softmax: x = F.softmax(x, dim=1) x = torch.cat([x, c4], 1) x = self.conv6(x) x = self.up2(x) if self.softmax: x = F.softmax(x, dim=1) x = torch.cat([x, c3], 1) x = self.conv7(x) x = self.up3(x) if self.softmax: x = F.softmax(x, dim=1) x = torch.cat([x, c2], 1) x = self.conv8(x) x = self.up4(x) if self.softmax: x = F.softmax(x, dim=1) x = torch.cat([x, c1], 1) x = self.conv9(x) if self.residual: x = torch.add(x, self.convres(c0)) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 17424 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x2 = xindex // 4356 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 139392 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x4 = xindex // 4356 x2 = xindex // 4356 % 8 x5 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 8 x3 = xindex x0 = xindex % 4096 x2 = xindex // 32768 tmp21 = tl.load(in_out_ptr0 + x3, None) tmp22 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_out_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr0 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp3, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_out_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr0 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp2, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_poi_fused_avg_pool2d_reflection_pad2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 36992 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex tmp0 = tl.load(in_ptr0 + (4030 + -128 * tl_math.abs(-31 + tl_math.abs(- 1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2 ), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4031 + -128 * tl_math.abs(-31 + tl_math.abs(- 1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2 ), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4094 + -128 * tl_math.abs(-31 + tl_math.abs(- 1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2 ), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4095 + -128 * tl_math.abs(-31 + tl_math.abs(- 1 + x1)) + -2 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + 4096 * x2 ), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 73984 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 1024 % 16 x5 = xindex x0 = xindex % 32 x3 = xindex // 16384 x6 = xindex // 32 % 512 tmp18 = tl.load(in_out_ptr0 + x5, None) tmp19 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 8, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_out_ptr0 + x5, tmp2, other=0.0) tmp4 = tl.load(in_ptr0 + x2, tmp2, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.load(in_ptr1 + (2 * x0 + 128 * x6 + 32768 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr1 + (1 + 2 * x0 + 128 * x6 + 32768 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp8 = tmp7 + tmp6 tmp9 = tl.load(in_ptr1 + (64 + 2 * x0 + 128 * x6 + 32768 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 + tmp8 tmp11 = tl.load(in_ptr1 + (65 + 2 * x0 + 128 * x6 + 32768 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp10 tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp5 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp2, tmp15, tmp16) tmp20 = tmp18 + tmp19 tmp21 = tl.where(tmp2, tmp17, tmp20) tmp22 = tl.where(tmp2, tmp21, tmp21) tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.01 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(in_out_ptr0 + x5, tmp27, None) @triton.jit def triton_poi_fused_avg_pool2d_reflection_pad2d_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20736 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (990 + -64 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (991 + -64 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1022 + -64 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1023 + -64 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 41472 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = xindex // 18 % 18 x4 = xindex // 324 x2 = xindex // 324 % 32 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 256 % 32 x5 = xindex x0 = xindex % 16 x3 = xindex // 8192 x6 = xindex // 16 % 512 tmp18 = tl.load(in_out_ptr0 + x5, None) tmp19 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 16, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_out_ptr0 + x5, tmp2, other=0.0) tmp4 = tl.load(in_ptr0 + x2, tmp2, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.load(in_ptr1 + (2 * x0 + 64 * x6 + 16384 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr1 + (1 + 2 * x0 + 64 * x6 + 16384 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp8 = tmp7 + tmp6 tmp9 = tl.load(in_ptr1 + (32 + 2 * x0 + 64 * x6 + 16384 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 + tmp8 tmp11 = tl.load(in_ptr1 + (33 + 2 * x0 + 64 * x6 + 16384 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp10 tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp5 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp2, tmp15, tmp16) tmp20 = tmp18 + tmp19 tmp21 = tl.where(tmp2, tmp17, tmp20) tmp22 = tl.where(tmp2, tmp21, tmp21) tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.01 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(in_out_ptr0 + x5, tmp27, None) @triton.jit def triton_poi_fused_avg_pool2d_reflection_pad2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 % 10 x2 = xindex // 100 x3 = xindex tmp0 = tl.load(in_ptr0 + (238 + -32 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + 256 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (239 + -32 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + 256 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (254 + -32 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + 256 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (255 + -32 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + 256 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 % 10 x4 = xindex // 100 x2 = xindex // 100 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 64 % 64 x5 = xindex x0 = xindex % 8 x3 = xindex // 4096 x6 = xindex // 8 % 512 tmp18 = tl.load(in_out_ptr0 + x5, None) tmp19 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 32, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_out_ptr0 + x5, tmp2, other=0.0) tmp4 = tl.load(in_ptr0 + x2, tmp2, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.load(in_ptr1 + (2 * x0 + 32 * x6 + 8192 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr1 + (1 + 2 * x0 + 32 * x6 + 8192 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp8 = tmp7 + tmp6 tmp9 = tl.load(in_ptr1 + (16 + 2 * x0 + 32 * x6 + 8192 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 + tmp8 tmp11 = tl.load(in_ptr1 + (17 + 2 * x0 + 32 * x6 + 8192 * x3), tmp2, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp10 tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp5 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp2, tmp15, tmp16) tmp20 = tmp18 + tmp19 tmp21 = tl.where(tmp2, tmp17, tmp20) tmp22 = tl.where(tmp2, tmp21, tmp21) tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.01 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(in_out_ptr0 + x5, tmp27, None) @triton.jit def triton_poi_fused_avg_pool2d_reflection_pad2d_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (54 + -16 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + 64 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (55 + -16 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + 64 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + -16 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + 64 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + -16 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + -2 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + 64 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_leaky_relu_backward_14( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x2 = xindex // 16 % 64 x0 = xindex % 4 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (2 * x0 + 16 * x5), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (1 + 2 * x0 + 16 * x5), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (8 + 2 * x0 + 16 * x5), None, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (9 + 2 * x0 + 16 * x5), None, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp4 + tmp3 tmp7 = tmp6 + tmp5 tmp9 = tmp8 + tmp7 tmp10 = 0.25 tmp11 = tmp9 * tmp10 tmp12 = tmp2 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.01 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tmp17 > tmp13 tl.store(in_out_ptr0 + x4, tmp12, None) tl.store(out_ptr0 + x4, tmp18, None) @triton.jit def triton_poi_fused_arange_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_16(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_17(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_clamp_mul_sub_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tmp10 = 0.0 tmp11 = tmp9 > tmp10 tmp12 = 0.01 tmp13 = tmp9 * tmp12 tmp14 = tl.where(tmp11, tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp17 = tmp15 < 0 tmp18 = tl.where(tmp17, tmp16, tmp15) tmp19 = tl.load(in_ptr2 + (tmp18 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tmp20 = tmp19 > tmp10 tmp21 = tmp19 * tmp12 tmp22 = tl.where(tmp20, tmp19, tmp21) tmp23 = tmp22 - tmp14 tmp25 = tmp23 * tmp24 tmp26 = tmp14 + tmp25 tl.store(out_ptr0 + x4, tmp26, None) @triton.jit def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 64 % 128 x3 = xindex // 8192 x4 = xindex % 64 x1 = xindex // 8 % 8 x0 = xindex % 8 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 64 * x2 + 4096 * x3), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([XBLOCK], 4, tl.int32) tmp8 = tmp6 + tmp7 tmp9 = tmp6 < 0 tmp10 = tl.where(tmp9, tmp8, tmp6) tmp11 = tl.load(in_ptr2 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp12 = tmp11 + tmp7 tmp13 = tmp11 < 0 tmp14 = tl.where(tmp13, tmp12, tmp11) tmp15 = tl.load(in_ptr3 + (tmp14 + 4 * tmp10 + 16 * x2 + 1024 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp16 = 0.0 tmp17 = tmp15 > tmp16 tmp18 = 0.01 tmp19 = tmp15 * tmp18 tmp20 = tl.where(tmp17, tmp15, tmp19) tmp21 = tl.load(in_ptr4 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp22 = tmp21 + tmp7 tmp23 = tmp21 < 0 tmp24 = tl.where(tmp23, tmp22, tmp21) tmp25 = tl.load(in_ptr3 + (tmp24 + 4 * tmp10 + 16 * x2 + 1024 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp26 = tmp25 > tmp16 tmp27 = tmp25 * tmp18 tmp28 = tl.where(tmp26, tmp25, tmp27) tmp29 = tmp28 - tmp20 tmp30 = tl.load(in_ptr5 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp31 = tmp29 * tmp30 tmp32 = tmp20 + tmp31 tmp33 = tmp32 - tmp5 tmp34 = tl.load(in_ptr6 + x1, tmp4, eviction_policy='evict_last', other=0.0 ) tmp35 = tmp33 * tmp34 tmp36 = tmp5 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp4, tmp36, tmp37) tmp39 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp42 = tl.load(in_ptr7 + (x4 + 64 * (-64 + x2) + 4096 * x3), tmp39, other=0.0) tmp43 = tl.where(tmp4, tmp38, tmp42) tl.store(out_ptr0 + x5, tmp43, None) @triton.jit def triton_poi_fused_reflection_pad2d_21(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = xindex // 10 % 10 x2 = xindex // 100 x3 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 % 10 x4 = xindex // 100 x2 = xindex // 100 % 32 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_arange_23(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 7, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x5 = xindex // 256 x2 = xindex // 256 % 32 x3 = xindex // 8192 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.load(in_ptr4 + (tmp8 + 8 * tmp4 + 64 * x2 + 8192 * x3), None, eviction_policy='evict_last') tmp13 = tmp11 + tmp12 tmp14 = 0.0 tmp15 = tmp13 > tmp14 tmp16 = 0.01 tmp17 = tmp13 * tmp16 tmp18 = tl.where(tmp15, tmp13, tmp17) tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 8 * tmp22 + 64 * x5), None, eviction_policy='evict_last') tmp24 = tmp23 + tmp10 tmp25 = tl.load(in_ptr4 + (tmp8 + 8 * tmp22 + 64 * x2 + 8192 * x3), None, eviction_policy='evict_last') tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp14 tmp28 = tmp26 * tmp16 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp31 = tmp30 + tmp1 tmp32 = tmp30 < 0 tmp33 = tl.where(tmp32, tmp31, tmp30) tmp34 = tl.load(in_ptr2 + (tmp33 + 8 * tmp22 + 64 * x5), None, eviction_policy='evict_last') tmp35 = tmp34 + tmp10 tmp36 = tl.load(in_ptr4 + (tmp33 + 8 * tmp22 + 64 * x2 + 8192 * x3), None, eviction_policy='evict_last') tmp37 = tmp35 + tmp36 tmp38 = tmp37 > tmp14 tmp39 = tmp37 * tmp16 tmp40 = tl.where(tmp38, tmp37, tmp39) tmp41 = tmp40 - tmp29 tmp43 = tmp41 * tmp42 tmp44 = tmp29 + tmp43 tmp45 = tl.load(in_ptr2 + (tmp33 + 8 * tmp4 + 64 * x5), None, eviction_policy='evict_last') tmp46 = tmp45 + tmp10 tmp47 = tl.load(in_ptr4 + (tmp33 + 8 * tmp4 + 64 * x2 + 8192 * x3), None, eviction_policy='evict_last') tmp48 = tmp46 + tmp47 tmp49 = tmp48 > tmp14 tmp50 = tmp48 * tmp16 tmp51 = tl.where(tmp49, tmp48, tmp50) tmp52 = tmp51 - tmp18 tmp53 = tmp52 * tmp42 tmp54 = tmp18 + tmp53 tmp55 = tmp54 - tmp44 tmp57 = tmp55 * tmp56 tl.store(in_out_ptr0 + x6, tmp44, None) tl.store(in_out_ptr1 + x6, tmp57, None) @triton.jit def triton_poi_fused_cat_reflection_pad2d_28(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 324 % 64 x0 = xindex % 18 x1 = xindex // 18 % 18 x3 = xindex // 20736 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2 + 8192 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2 + 8192 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp13 = tl.load(in_ptr2 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * (-32 + x2) + 8192 * x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_29(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20736 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = xindex // 18 % 18 x4 = xindex // 324 x2 = xindex // 324 % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_30( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 16 x0 = xindex % 256 x2 = xindex // 4096 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = x1 tl.full([1], 0, tl.int64) tmp6 = tl.full([1], 32, tl.int64) tmp7 = tmp3 < tmp6 tmp8 = tl.load(in_ptr1 + (x0 + 256 * x1 + 8192 * x2), tmp7, other=0.0) tmp9 = tl.load(in_ptr2 + (x0 + 256 * x1 + 8192 * x2), tmp7, other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp7, tmp10, tmp11) tmp13 = tmp3 >= tmp6 tl.full([1], 64, tl.int64) tmp16 = tl.load(in_ptr3 + (x0 + 256 * (-32 + x1) + 8192 * x2), tmp13, other=0.0) tmp17 = tl.where(tmp7, tmp12, tmp16) tmp18 = tmp2 + tmp17 tmp19 = 0.0 tmp20 = tmp18 > tmp19 tmp21 = 0.01 tmp22 = tmp18 * tmp21 tmp23 = tl.where(tmp20, tmp18, tmp22) tmp24 = tmp23 > tmp19 tl.store(in_out_ptr0 + x3, tmp18, None) tl.store(out_ptr0 + x3, tmp24, None) @triton.jit def triton_poi_fused_arange_31(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_32(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_33(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 15, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_clamp_mul_sub_34(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x2 = xindex // 1024 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x2), None, eviction_policy='evict_last') tmp10 = 0.0 tmp11 = tmp9 > tmp10 tmp12 = 0.01 tmp13 = tmp9 * tmp12 tmp14 = tl.where(tmp11, tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp17 = tmp15 < 0 tmp18 = tl.where(tmp17, tmp16, tmp15) tmp19 = tl.load(in_ptr2 + (tmp18 + 16 * tmp4 + 256 * x2), None, eviction_policy='evict_last') tmp20 = tmp19 > tmp10 tmp21 = tmp19 * tmp12 tmp22 = tl.where(tmp20, tmp19, tmp21) tmp23 = tmp22 - tmp14 tmp25 = tmp23 * tmp24 tmp26 = tmp14 + tmp25 tl.store(out_ptr0 + x4, tmp26, None) @triton.jit def triton_poi_fused_cat_36(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 1024 % 32 x3 = xindex // 32768 x4 = xindex % 1024 x1 = xindex // 32 % 32 x0 = xindex % 32 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 16, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 1024 * x2 + 16384 * x3), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([XBLOCK], 16, tl.int32) tmp8 = tmp6 + tmp7 tmp9 = tmp6 < 0 tmp10 = tl.where(tmp9, tmp8, tmp6) tmp11 = tl.load(in_ptr2 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp12 = tmp11 + tmp7 tmp13 = tmp11 < 0 tmp14 = tl.where(tmp13, tmp12, tmp11) tmp15 = tl.load(in_ptr3 + (tmp14 + 16 * tmp10 + 256 * x2 + 4096 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp16 = 0.0 tmp17 = tmp15 > tmp16 tmp18 = 0.01 tmp19 = tmp15 * tmp18 tmp20 = tl.where(tmp17, tmp15, tmp19) tmp21 = tl.load(in_ptr4 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp22 = tmp21 + tmp7 tmp23 = tmp21 < 0 tmp24 = tl.where(tmp23, tmp22, tmp21) tmp25 = tl.load(in_ptr3 + (tmp24 + 16 * tmp10 + 256 * x2 + 4096 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp26 = tmp25 > tmp16 tmp27 = tmp25 * tmp18 tmp28 = tl.where(tmp26, tmp25, tmp27) tmp29 = tmp28 - tmp20 tmp30 = tl.load(in_ptr5 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp31 = tmp29 * tmp30 tmp32 = tmp20 + tmp31 tmp33 = tmp32 - tmp5 tmp34 = tl.load(in_ptr6 + x1, tmp4, eviction_policy='evict_last', other=0.0 ) tmp35 = tmp33 * tmp34 tmp36 = tmp5 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp4, tmp36, tmp37) tmp39 = tmp0 >= tmp3 tl.full([1], 32, tl.int64) tmp42 = tl.load(in_ptr7 + (x4 + 1024 * (-16 + x2) + 16384 * x3), tmp39, other=0.0) tmp43 = tl.where(tmp4, tmp38, tmp42) tl.store(out_ptr0 + x5, tmp43, None) @triton.jit def triton_poi_fused_reflection_pad2d_37(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 147968 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_38(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 36992 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 8 x5 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_arange_39(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_40(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_41(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_clamp_mul_sub_42(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_43( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x5 = xindex // 4096 x2 = xindex // 4096 % 8 x3 = xindex // 32768 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.load(in_ptr4 + (tmp8 + 32 * tmp4 + 1024 * x2 + 32768 * x3), None, eviction_policy='evict_last') tmp13 = tmp11 + tmp12 tmp14 = 0.0 tmp15 = tmp13 > tmp14 tmp16 = 0.01 tmp17 = tmp13 * tmp16 tmp18 = tl.where(tmp15, tmp13, tmp17) tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 32 * tmp22 + 1024 * x5), None, eviction_policy='evict_last') tmp24 = tmp23 + tmp10 tmp25 = tl.load(in_ptr4 + (tmp8 + 32 * tmp22 + 1024 * x2 + 32768 * x3), None, eviction_policy='evict_last') tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp14 tmp28 = tmp26 * tmp16 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp31 = tmp30 + tmp1 tmp32 = tmp30 < 0 tmp33 = tl.where(tmp32, tmp31, tmp30) tmp34 = tl.load(in_ptr2 + (tmp33 + 32 * tmp22 + 1024 * x5), None, eviction_policy='evict_last') tmp35 = tmp34 + tmp10 tmp36 = tl.load(in_ptr4 + (tmp33 + 32 * tmp22 + 1024 * x2 + 32768 * x3), None, eviction_policy='evict_last') tmp37 = tmp35 + tmp36 tmp38 = tmp37 > tmp14 tmp39 = tmp37 * tmp16 tmp40 = tl.where(tmp38, tmp37, tmp39) tmp41 = tmp40 - tmp29 tmp43 = tmp41 * tmp42 tmp44 = tmp29 + tmp43 tmp45 = tl.load(in_ptr2 + (tmp33 + 32 * tmp4 + 1024 * x5), None, eviction_policy='evict_last') tmp46 = tmp45 + tmp10 tmp47 = tl.load(in_ptr4 + (tmp33 + 32 * tmp4 + 1024 * x2 + 32768 * x3), None, eviction_policy='evict_last') tmp48 = tmp46 + tmp47 tmp49 = tmp48 > tmp14 tmp50 = tmp48 * tmp16 tmp51 = tl.where(tmp49, tmp48, tmp50) tmp52 = tmp51 - tmp18 tmp53 = tmp52 * tmp42 tmp54 = tmp18 + tmp53 tmp55 = tmp54 - tmp44 tmp57 = tmp55 * tmp56 tl.store(in_out_ptr0 + x6, tmp44, None) tl.store(in_out_ptr1 + x6, tmp57, None) @triton.jit def triton_poi_fused_cat_reflection_pad2d_44(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 278784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4356 % 16 x0 = xindex % 66 x1 = xindex // 66 % 66 x3 = xindex // 69696 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2 + 32768 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2 + 32768 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 16, tl.int64) tmp13 = tl.load(in_ptr2 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * (-8 + x2) + 32768 * x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_45(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 17424 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x2 = xindex // 4356 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.01 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_46( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 x1 = xindex // 4096 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp24 = tl.load(in_ptr4 + x2, None) tmp25 = tl.load(in_ptr5 + 0) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp28 = tl.load(in_ptr6 + x2, None) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int64) tmp6 = tl.full([1], 8, tl.int64) tmp7 = tmp4 < tmp6 tmp8 = tl.load(in_ptr1 + (x0 + 4096 * 0 + 32768 * x1), tmp7, other=0.0) tmp9 = tl.load(in_ptr2 + (x0 + 4096 * 0 + 32768 * x1), tmp7, other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp7, tmp10, tmp11) tmp13 = tmp4 >= tmp6 tl.full([1], 16, tl.int64) tmp16 = tl.load(in_ptr3 + (x0 + 4096 * -8 + 32768 * x1), tmp13, other=0.0) tmp17 = tl.where(tmp7, tmp12, tmp16) tmp18 = tmp3 + tmp17 tmp19 = 0.0 tmp20 = tmp18 > tmp19 tmp21 = 0.01 tmp22 = tmp18 * tmp21 tmp23 = tl.where(tmp20, tmp18, tmp22) tmp27 = tmp24 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tmp29 > tmp19 tmp31 = tmp29 * tmp21 tmp32 = tl.where(tmp30, tmp29, tmp31) tmp33 = tmp23 + tmp32 tmp34 = tmp32 > tmp19 tmp35 = tmp23 > tmp19 tl.store(out_ptr0 + x2, tmp33, None) tl.store(out_ptr1 + x2, tmp34, None) tl.store(out_ptr2 + x2, tmp35, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_47(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.01 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = tmp8 > tmp4 tl.store(out_ptr0 + x0, tmp9, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_48(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 8 x2 = xindex // 8192 x4 = xindex % 8192 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 32768 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_49(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 8 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_50(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 16 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_51(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 32 x2 = xindex // 2048 x4 = xindex % 2048 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 8192 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_52(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_53(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_54(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_55(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_56(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_57(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 8 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (8, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_13, (32,), (1,)) assert_size_stride(primals_14, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (32, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (32,), (1,)) assert_size_stride(primals_24, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_25, (32,), (1,)) assert_size_stride(primals_26, (16, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (16,), (1,)) assert_size_stride(primals_28, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_29, (16,), (1,)) assert_size_stride(primals_30, (8, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_31, (8,), (1,)) assert_size_stride(primals_32, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_33, (8,), (1,)) assert_size_stride(primals_34, (1, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_35, (1,), (1,)) assert_size_stride(primals_36, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_37, (1,), (1,)) assert_size_stride(primals_38, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_39, (1,), (1,)) assert_size_stride(primals_40, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_41, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 66, 66), (4356, 4356, 66, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(17424)](primals_1, buf0, 17424, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf2 = empty_strided_cuda((4, 8, 66, 66), (34848, 4356, 66, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1[grid(139392) ](buf1, primals_3, buf2, 139392, XBLOCK=512, num_warps=8, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_convolution_leaky_relu_2[grid(131072)](buf4, primals_5, primals_1, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 8, 34, 34), (9248, 1156, 34, 1), torch.float32) triton_poi_fused_avg_pool2d_reflection_pad2d_3[grid(36992)](buf4, buf5, 36992, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf7 = empty_strided_cuda((4, 16, 34, 34), (18496, 1156, 34, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_4[grid(73984) ](buf6, primals_7, buf7, 73984, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf9 = buf8 del buf8 buf10 = buf9 del buf9 triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_5[grid(65536)]( buf10, primals_9, buf4, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf11 = empty_strided_cuda((4, 16, 18, 18), (5184, 324, 18, 1), torch.float32) triton_poi_fused_avg_pool2d_reflection_pad2d_6[grid(20736)](buf10, buf11, 20736, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 32, 16, 16), (8192, 256, 16, 1)) buf13 = empty_strided_cuda((4, 32, 18, 18), (10368, 324, 18, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_7[grid(41472) ](buf12, primals_11, buf13, 41472, XBLOCK=256, num_warps=4, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 32, 16, 16), (8192, 256, 16, 1)) buf15 = buf14 del buf14 buf16 = buf15 del buf15 triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_8[grid(32768)]( buf16, primals_13, buf10, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf17 = empty_strided_cuda((4, 32, 10, 10), (3200, 100, 10, 1), torch.float32) triton_poi_fused_avg_pool2d_reflection_pad2d_9[grid(12800)](buf16, buf17, 12800, XBLOCK=128, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 8, 8), (4096, 64, 8, 1)) buf19 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_10[grid(25600) ](buf18, primals_15, buf19, 25600, XBLOCK=256, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 8, 8), (4096, 64, 8, 1)) buf21 = buf20 del buf20 buf22 = buf21 del buf21 triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_11[grid(16384)]( buf22, primals_17, buf16, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf23 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch. float32) triton_poi_fused_avg_pool2d_reflection_pad2d_12[grid(9216)](buf22, buf23, 9216, XBLOCK=128, num_warps=4, num_stages=1) buf24 = extern_kernels.convolution(buf23, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 4, 4), (1024, 16, 4, 1)) buf25 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_13[grid(9216) ](buf24, primals_19, buf25, 9216, XBLOCK=256, num_warps=4, num_stages=1) buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 4, 4), (1024, 16, 4, 1)) buf27 = buf26 del buf26 buf100 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool ) triton_poi_fused_add_avg_pool2d_convolution_leaky_relu_leaky_relu_backward_14[ grid(4096)](buf27, primals_21, buf22, buf100, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf28 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused_arange_15[grid(8)](buf28, 8, XBLOCK=8, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_16[grid(8)](buf29, 8, XBLOCK=8, num_warps =1, num_stages=1) buf30 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_17[grid(8)](buf30, 8, XBLOCK=8, num_warps=1, num_stages=1) buf31 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_16[grid(8)](buf31, 8, XBLOCK=8, num_warps =1, num_stages=1) buf32 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused_add_clamp_17[grid(8)](buf32, 8, XBLOCK=8, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((8,), (1,), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_18[grid(8)](buf33, 8, XBLOCK=8, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch. float32) triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_19[grid(16384)]( buf29, buf31, buf27, buf32, buf33, buf34, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf35 = empty_strided_cuda((8, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_18[grid(8)](buf35, 8, XBLOCK=8, num_warps=1, num_stages=1) buf36 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_cat_20[grid(32768)](buf34, buf30, buf31, buf27, buf32, buf33, buf35, buf22, buf36, 32768, XBLOCK=256, num_warps =4, num_stages=1) del buf27 del buf34 buf37 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) triton_poi_fused_reflection_pad2d_21[grid(51200)](buf36, buf37, 51200, XBLOCK=512, num_warps=4, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 32, 8, 8), (2048, 64, 8, 1)) buf39 = empty_strided_cuda((4, 32, 10, 10), (3200, 100, 10, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_22[grid(12800) ](buf38, primals_23, buf39, 12800, XBLOCK=256, num_warps=4, num_stages=1) buf40 = extern_kernels.convolution(buf39, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 32, 8, 8), (2048, 64, 8, 1)) buf41 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused_arange_23[grid(16)](buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf42, 16, XBLOCK=16, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf43, 16, XBLOCK=16, num_warps=1, num_stages=1) buf44 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf44, 16, XBLOCK=16, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf45, 16, XBLOCK=16, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_26[grid(16)](buf48, 16, XBLOCK=16, num_warps=1, num_stages=1) buf50 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_26[grid(16)](buf50, 16, XBLOCK=16, num_warps=1, num_stages=1) buf47 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf46 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf49 = buf46 del buf46 buf51 = buf47 del buf47 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27[ grid(32768)](buf49, buf51, buf43, buf44, buf40, primals_25, buf36, buf42, buf45, buf48, buf50, 32768, XBLOCK=256, num_warps =4, num_stages=1) buf52 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1), torch.float32) triton_poi_fused_cat_reflection_pad2d_28[grid(82944)](buf49, buf51, buf16, buf52, 82944, XBLOCK=512, num_warps=8, num_stages=1) buf53 = extern_kernels.convolution(buf52, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 16, 16, 16), (4096, 256, 16, 1)) buf54 = empty_strided_cuda((4, 16, 18, 18), (5184, 324, 18, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_29[grid(20736) ](buf53, primals_27, buf54, 20736, XBLOCK=256, num_warps=4, num_stages=1) buf55 = extern_kernels.convolution(buf54, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 16, 16, 16), (4096, 256, 16, 1)) buf56 = buf55 del buf55 buf96 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_30[grid (16384)](buf56, primals_29, buf49, buf51, buf16, buf96, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf49 del buf51 del primals_29 buf57 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_arange_31[grid(32)](buf57, 32, XBLOCK=32, num_warps=1, num_stages=1) buf58 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_32[grid(32)](buf58, 32, XBLOCK=32, num_warps=1, num_stages=1) buf59 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_33[grid(32)](buf59, 32, XBLOCK=32, num_warps=1, num_stages=1) buf60 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_32[grid(32)](buf60, 32, XBLOCK=32, num_warps=1, num_stages=1) buf61 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_add_clamp_33[grid(32)](buf61, 32, XBLOCK=32, num_warps=1, num_stages=1) buf62 = empty_strided_cuda((32,), (1,), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_34[grid(32)](buf62, 32, XBLOCK=32, num_warps=1, num_stages=1) buf63 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) triton_poi_fused__unsafe_index_add_leaky_relu_mul_sub_35[grid(65536)]( buf58, buf60, buf56, buf61, buf62, buf63, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf64 = empty_strided_cuda((32, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_34[grid(32)](buf64, 32, XBLOCK=32, num_warps=1, num_stages=1) buf65 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) triton_poi_fused_cat_36[grid(131072)](buf63, buf59, buf60, buf56, buf61, buf62, buf64, buf10, buf65, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf63 buf66 = empty_strided_cuda((4, 32, 34, 34), (36992, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_37[grid(147968)](buf65, buf66, 147968, XBLOCK=512, num_warps=8, num_stages=1) buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf68 = empty_strided_cuda((4, 8, 34, 34), (9248, 1156, 34, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_38[grid(36992) ](buf67, primals_31, buf68, 36992, XBLOCK=512, num_warps=4, num_stages=1) buf69 = extern_kernels.convolution(buf68, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf69, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf70 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_arange_39[grid(64)](buf70, 64, XBLOCK=64, num_warps=1, num_stages=1) buf71 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_40[grid(64)](buf71, 64, XBLOCK=64, num_warps=1, num_stages=1) buf72 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_41[grid(64)](buf72, 64, XBLOCK=64, num_warps=1, num_stages=1) buf73 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_40[grid(64)](buf73, 64, XBLOCK=64, num_warps=1, num_stages=1) buf74 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_41[grid(64)](buf74, 64, XBLOCK=64, num_warps=1, num_stages=1) buf77 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_42[grid(64)](buf77, 64, XBLOCK=64, num_warps=1, num_stages=1) buf79 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_clamp_mul_sub_42[grid(64)](buf79, 64, XBLOCK=64, num_warps=1, num_stages=1) buf76 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32) buf75 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32) buf78 = buf75 del buf75 buf80 = buf76 del buf76 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_43[ grid(131072)](buf78, buf80, buf72, buf73, buf69, primals_33, buf65, buf71, buf74, buf77, buf79, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf81 = empty_strided_cuda((4, 16, 66, 66), (69696, 4356, 66, 1), torch.float32) triton_poi_fused_cat_reflection_pad2d_44[grid(278784)](buf78, buf80, buf4, buf81, 278784, XBLOCK=512, num_warps=8, num_stages=1) buf82 = extern_kernels.convolution(buf81, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf82, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf83 = empty_strided_cuda((4, 1, 66, 66), (4356, 4356, 66, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_45[grid(17424) ](buf82, primals_35, buf83, 17424, XBLOCK=128, num_warps=4, num_stages=1) buf84 = extern_kernels.convolution(buf83, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf86 = extern_kernels.convolution(buf0, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf87 = empty_strided_cuda((4, 1, 66, 66), (4356, 4356, 66, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_45[grid(17424) ](buf86, primals_39, buf87, 17424, XBLOCK=128, num_warps=4, num_stages=1) buf88 = extern_kernels.convolution(buf87, primals_40, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf88, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf85 = reinterpret_tensor(buf84, (4, 1, 64, 64), (4096, 16384, 64, 1), 0) del buf84 buf89 = reinterpret_tensor(buf56, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf56 buf90 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) buf92 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_46[grid (16384)](buf85, primals_37, buf78, buf80, buf4, buf88, primals_41, primals_1, buf89, buf90, buf92, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf78 del buf80 del buf85 del buf88 del primals_1 del primals_37 del primals_41 buf91 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_47[grid (16384)](buf86, primals_39, buf91, 16384, XBLOCK=256, num_warps =4, num_stages=1) del buf86 del primals_39 buf93 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_47[grid (16384)](buf82, primals_35, buf93, 16384, XBLOCK=256, num_warps =4, num_stages=1) del buf82 del primals_35 buf94 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_48[grid (32768)](buf69, primals_33, buf65, buf94, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf65 del buf69 del primals_33 buf95 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_49[grid (32768)](buf67, primals_31, buf95, 32768, XBLOCK=128, num_warps =4, num_stages=1) del buf67 del primals_31 buf97 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_50[grid (16384)](buf53, primals_27, buf97, 16384, XBLOCK=256, num_warps =4, num_stages=1) del buf53 del primals_27 buf98 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_51[grid (8192)](buf40, primals_25, buf36, buf98, 8192, XBLOCK=128, num_warps=4, num_stages=1) del buf36 del buf40 del primals_25 buf99 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_52[grid (8192)](buf38, primals_23, buf99, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf38 del primals_23 buf101 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_53[grid (4096)](buf24, primals_19, buf101, 4096, XBLOCK=128, num_warps= 4, num_stages=1) del buf24 del primals_19 buf102 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_54[grid (16384)](buf18, primals_15, buf102, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf18 del primals_15 buf103 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_55[grid (32768)](buf12, primals_11, buf103, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del primals_11 buf104 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_56[grid (65536)](buf6, primals_7, buf104, 65536, XBLOCK=256, num_warps= 4, num_stages=1) del buf6 del primals_7 buf105 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_57[grid (131072)](buf1, primals_3, buf105, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf1 del primals_3 return (buf89, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, buf0, buf2, buf4, buf5, buf7, buf10, buf11, buf13, buf16, buf17, buf19, buf22, buf23, buf25, buf28, buf29, buf30, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf42, buf43, buf44, buf45, buf48, buf50, buf52, buf54, buf57, buf58, buf59, buf60, buf61, buf62, buf64, buf66, buf68, buf70, buf71, buf72, buf73, buf74, buf77, buf79, buf81, buf83, buf87, buf90, buf91, buf92, buf93, buf94, buf95, buf96, buf97, buf98, buf99, buf100, buf101, buf102, buf103, buf104, buf105) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm=None, residual=True, activation='leakyrelu', in_place_activation=True, transpose=False, reflectpad=True): super(ConvBlock, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose self.reflectpad = reflectpad if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm is not None: if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=0 if self.reflectpad else 1) if self.activation == 'relu': self.actfun1 = nn.ReLU(inplace=in_place_activation) self.actfun2 = nn.ReLU(inplace=in_place_activation) elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU(inplace=in_place_activation) self.actfun2 = nn.LeakyReLU(inplace=in_place_activation) elif self.activation == 'elu': self.actfun1 = nn.ELU(inplace=in_place_activation) self.actfun2 = nn.ELU(inplace=in_place_activation) elif self.activation == 'selu': self.actfun1 = nn.SELU(inplace=in_place_activation) self.actfun2 = nn.SELU(inplace=in_place_activation) if self.reflectpad: self.rpad1 = nn.ReflectionPad2d(1) self.rpad2 = nn.ReflectionPad2d(1) def forward(self, x): ox = x if self.reflectpad: x = self.rpad1(x) x = self.conv1(x) if self.dropout: x = self.dropout1(x) x = self.actfun1(x) if self.norm1: x = self.norm1(x) if self.reflectpad: x = self.rpad2(x) x = self.conv2(x) if self.dropout: x = self.dropout2(x) if self.residual: x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox. shape[1], x.shape[1]), :, :] x = self.actfun2(x) if self.norm2: x = self.norm2(x) return x class UnetNew(nn.Module): def __init__(self, n_channel_in=1, n_channel_out=1, n_internal_channels =8, residual=True, down='avgpool', up='bilinear', activation= 'leakyrelu', norm=None, softmax=False): super(UnetNew, self).__init__() self.residual = residual self.softmax = softmax nic = n_internal_channels if down == 'maxpool': self.down1 = nn.MaxPool2d(kernel_size=2) self.down2 = nn.MaxPool2d(kernel_size=2) self.down3 = nn.MaxPool2d(kernel_size=2) self.down4 = nn.MaxPool2d(kernel_size=2) elif down == 'avgpool': self.down1 = nn.AvgPool2d(kernel_size=2) self.down2 = nn.AvgPool2d(kernel_size=2) self.down3 = nn.AvgPool2d(kernel_size=2) self.down4 = nn.AvgPool2d(kernel_size=2) elif down == 'convpool': self.down1 = nn.Conv2d(nic, nic, kernel_size=2, stride=2, groups=32 ) self.down2 = nn.Conv2d(nic * 2, nic * 2, kernel_size=2, stride= 2, groups=64) self.down3 = nn.Conv2d(nic * 4, nic * 4, kernel_size=2, stride= 2, groups=128) self.down4 = nn.Conv2d(nic * 8, nic * 8, kernel_size=2, stride= 2, groups=256) self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25 self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25 self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25 self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25 self.down1.bias.data = 0.01 * self.down1.bias.data + 0 self.down2.bias.data = 0.01 * self.down2.bias.data + 0 self.down3.bias.data = 0.01 * self.down3.bias.data + 0 self.down4.bias.data = 0.01 * self.down4.bias.data + 0 if up == 'bilinear' or up == 'nearest': self.up1 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) self.up2 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) self.up3 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) self.up4 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2, align_corners=False) elif up == 'tconv': self.up1 = nn.ConvTranspose2d(nic * 8, nic * 8, kernel_size=2, stride=2, groups=nic * 8) self.up2 = nn.ConvTranspose2d(nic * 4, nic * 4, kernel_size=2, stride=2, groups=nic * 4) self.up3 = nn.ConvTranspose2d(nic * 2, nic * 2, kernel_size=2, stride=2, groups=nic * 2) self.up4 = nn.ConvTranspose2d(nic, nic, kernel_size=2, stride=2, groups=nic) self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25 self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25 self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25 self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25 self.up1.bias.data = 0.01 * self.up1.bias.data + 0 self.up2.bias.data = 0.01 * self.up2.bias.data + 0 self.up3.bias.data = 0.01 * self.up3.bias.data + 0 self.up4.bias.data = 0.01 * self.up4.bias.data + 0 self.conv1 = ConvBlock(n_channel_in, nic, residual=residual, activation=activation, norm=norm) self.conv2 = ConvBlock(nic, nic * 2, residual=residual, activation= activation, norm=norm) self.conv3 = ConvBlock(nic * 2, nic * 4, residual=residual, activation=activation, norm=norm) self.conv4 = ConvBlock(nic * 4, nic * 8, residual=residual, activation=activation, norm=norm) self.conv5 = ConvBlock(nic * 8, nic * 8, residual=residual, activation=activation, norm=norm) self.conv6 = ConvBlock(2 * nic * 8, nic * 4, residual=residual, activation=activation, norm=norm) self.conv7 = ConvBlock(2 * nic * 4, nic * 2, residual=residual, activation=activation, norm=norm) self.conv8 = ConvBlock(2 * nic * 2, nic, residual=residual, activation=activation, norm=norm) self.conv9 = ConvBlock(2 * nic, n_channel_out, residual=residual, activation=activation, norm=norm) if self.residual: self.convres = ConvBlock(n_channel_in, n_channel_out, residual= residual, activation=activation, norm=norm) def forward(self, input_0): primals_2 = self.conv1.conv1.weight primals_3 = self.conv1.conv1.bias primals_4 = self.conv1.conv2.weight primals_5 = self.conv1.conv2.bias primals_6 = self.conv2.conv1.weight primals_7 = self.conv2.conv1.bias primals_8 = self.conv2.conv2.weight primals_9 = self.conv2.conv2.bias primals_10 = self.conv3.conv1.weight primals_11 = self.conv3.conv1.bias primals_12 = self.conv3.conv2.weight primals_13 = self.conv3.conv2.bias primals_14 = self.conv4.conv1.weight primals_15 = self.conv4.conv1.bias primals_16 = self.conv4.conv2.weight primals_17 = self.conv4.conv2.bias primals_18 = self.conv5.conv1.weight primals_19 = self.conv5.conv1.bias primals_20 = self.conv5.conv2.weight primals_21 = self.conv5.conv2.bias primals_22 = self.conv6.conv1.weight primals_23 = self.conv6.conv1.bias primals_24 = self.conv6.conv2.weight primals_25 = self.conv6.conv2.bias primals_26 = self.conv7.conv1.weight primals_27 = self.conv7.conv1.bias primals_28 = self.conv7.conv2.weight primals_29 = self.conv7.conv2.bias primals_30 = self.conv8.conv1.weight primals_31 = self.conv8.conv1.bias primals_32 = self.conv8.conv2.weight primals_33 = self.conv8.conv2.bias primals_34 = self.conv9.conv1.weight primals_35 = self.conv9.conv1.bias primals_36 = self.conv9.conv2.weight primals_37 = self.conv9.conv2.bias primals_38 = self.convres.conv1.weight primals_39 = self.convres.conv1.bias primals_40 = self.convres.conv2.weight primals_41 = self.convres.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41]) return output[0]
royerloic/aydin
Unet
false
16,515
[ "BSD-3-Clause" ]
78
f9c61a24030891d008c318b250da5faec69fcd7d
https://github.com/royerloic/aydin/tree/f9c61a24030891d008c318b250da5faec69fcd7d
PatchMerging
import torch import torch.nn as nn from torch import optim as optim class PatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Conv2d(4 * dim, 2 * dim, 1, 1, bias=False) self.norm = norm_layer(4 * dim) def forward(self, x): """ x: B, H*W, C """ B, C, H, W = x.shape assert H % 2 == 0 and W % 2 == 0, f'x size ({H}*{W}) are not even.' x = x.view(B, C, H, W) x0 = x[:, :, 0::2, 0::2] x1 = x[:, :, 1::2, 0::2] x2 = x[:, :, 0::2, 1::2] x3 = x[:, :, 1::2, 1::2] x = torch.cat([x0, x1, x2, x3], 1) x = self.norm(x) x = self.reduction(x) return x def extra_repr(self) ->str: return f'input_resolution={self.input_resolution}, dim={self.dim}' def flops(self): H, W = self.input_resolution flops = H * W * self.dim flops += H // 2 * (W // 2) * 4 * self.dim * 2 * self.dim return flops def get_inputs(): return [torch.rand([4, 4, 32, 32])] def get_init_inputs(): return [[], {'input_resolution': 4, 'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 1024 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x1 = xindex // 16 % 16 r3 = rindex x0 = xindex % 16 x2 = xindex // 256 x4 = xindex tmp46 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp0 = x1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * r3 + 64 * x0 + 1024 * x1 + 4096 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (32 + 2 * r3 + 64 * x0 + 1024 * (-4 + x1) + 4096 * x2), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (1 + 2 * r3 + 64 * x0 + 1024 * (-8 + x1) + 4096 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1, 1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (33 + 2 * r3 + 64 * x0 + 1024 * (-12 + x1) + 4096 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tl.full([XBLOCK, 1], 16, tl.int32) tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 / tmp31 tmp33 = tmp23 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = 16.0 tmp40 = tmp38 / tmp39 tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = libdevice.rsqrt(tmp42) tmp44 = tmp22 - tmp32 tmp45 = tmp44 * tmp43 tmp47 = tmp45 * tmp46 tmp49 = tmp47 + tmp48 tl.store(out_ptr0 + (r3 + 16 * x4), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp43, xmask) tl.store(out_ptr2 + (r3 + 16 * x4), tmp49, xmask) tl.store(out_ptr1 + x4, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 32, 32), (4096, 1024, 32, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (8, 16, 1, 1), (16, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 16, 16, 1), (256, 16, 1, 1), torch. float32) buf2 = empty_strided_cuda((4, 16, 16, 1), (256, 16, 1, 1024), torch .float32) buf4 = reinterpret_tensor(buf2, (4, 16, 16, 1), (256, 16, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused_cat_native_layer_norm_0[grid(1024)](buf4, primals_1, primals_2, primals_3, buf0, buf1, buf5, 1024, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 del primals_2 del primals_3 buf6 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 16, 16), (2048, 256, 16, 1)) return buf6, primals_4, buf0, buf1, buf4, buf5 class PatchMergingNew(nn.Module): """ Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Conv2d(4 * dim, 2 * dim, 1, 1, bias=False) self.norm = norm_layer(4 * dim) def extra_repr(self) ->str: return f'input_resolution={self.input_resolution}, dim={self.dim}' def flops(self): H, W = self.input_resolution flops = H * W * self.dim flops += H // 2 * (W // 2) * 4 * self.dim * 2 * self.dim return flops def forward(self, input_0): primals_4 = self.reduction.weight primals_2 = self.norm.weight primals_3 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
svip-lab/AS-MLP
PatchMerging
false
16,516
[ "MIT" ]
66
5f360348583b3cac8663a392c9588b6f7e2f46b8
https://github.com/svip-lab/AS-MLP/tree/5f360348583b3cac8663a392c9588b6f7e2f46b8
upconv
import torch import torch.nn as nn from torch.nn import functional as F import torch.utils.data.distributed class upconv(nn.Module): def __init__(self, in_channels, out_channels, ratio=2): super(upconv, self).__init__() self.elu = nn.ELU() self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, bias=False, kernel_size=3, stride=1, padding=1) self.ratio = ratio def forward(self, x): up_x = F.interpolate(x, scale_factor=self.ratio, mode='nearest') out = self.conv(up_x) out = self.elu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) triton_poi_fused_elu_1[grid(1024)](buf1, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_2, buf0, buf1 class upconvNew(nn.Module): def __init__(self, in_channels, out_channels, ratio=2): super(upconvNew, self).__init__() self.elu = nn.ELU() self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, bias=False, kernel_size=3, stride=1, padding=1) self.ratio = ratio def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
syKevinPeng/TransDepth
upconv
false
16,517
[ "MIT" ]
118
2282039da7bc0812e19a27b2d73a25bdef97d739
https://github.com/syKevinPeng/TransDepth/tree/2282039da7bc0812e19a27b2d73a25bdef97d739
UpsamplingLinear1d
import torch import torch.nn.functional as F import torch.nn as nn class UpsamplingLinear1d(nn.Module): def __init__(self, scale_factor=2.0): super().__init__() self.scale_factor = scale_factor def forward(self, x): return F.interpolate(x, scale_factor=self.scale_factor, mode= 'linear', align_corners=True) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.42857142857142855 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.load(in_ptr0 + (tmp6 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp6 + tmp8 tmp10 = tl.full([1], 3, tl.int64) tmp11 = triton_helpers.minimum(tmp9, tmp10) tmp12 = tl.load(in_ptr0 + (tmp11 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp13 = tmp12 - tmp7 tmp14 = tmp6.to(tl.float32) tmp15 = tmp5 - tmp14 tmp16 = triton_helpers.maximum(tmp15, tmp4) tmp17 = 1.0 tmp18 = triton_helpers.minimum(tmp16, tmp17) tmp19 = tmp13 * tmp18 tmp20 = tmp7 + tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class UpsamplingLinear1dNew(nn.Module): def __init__(self, scale_factor=2.0): super().__init__() self.scale_factor = scale_factor def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
tailintalent/ar-pde-cnn
UpsamplingLinear1d
false
16,518
[ "MIT" ]
51
88c130d7296af4ef7c13ec28a287fec4af3639f7
https://github.com/tailintalent/ar-pde-cnn/tree/88c130d7296af4ef7c13ec28a287fec4af3639f7
NonLocal2d
import torch import torch.nn as nn import torch.nn.functional as F from torchvision.transforms import functional as F from torch.nn import functional as F import torch.utils.data class NonLocal2d(nn.Module): def __init__(self, dim_in, dim_inner, dim_out, max_pool_stride=2, use_maxpool=True, use_gn=False, use_scale=True): super().__init__() self.dim_inner = dim_inner self.use_maxpool = use_maxpool self.use_gn = use_gn self.use_scale = use_scale self.theta = nn.Conv2d(dim_in, dim_inner, 1, stride=1, padding=0) if self.use_maxpool: self.pool = nn.MaxPool2d(kernel_size=max_pool_stride, stride= max_pool_stride, padding=0) self.phi = nn.Conv2d(dim_in, dim_inner, 1, stride=1, padding=0) self.g = nn.Conv2d(dim_in, dim_inner, 1, stride=1, padding=0) self.out = nn.Conv2d(dim_inner, dim_out, 1, stride=1, padding=0) if self.use_gn: self.gn = nn.GroupNorm(32, dim_out, eps=1e-05) self._init_weights() def _init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.GroupNorm): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): batch_size = x.size(0) theta_x = self.theta(x).view(batch_size, self.dim_inner, -1) theta_x = theta_x.permute(0, 2, 1) if self.use_maxpool: pool_x = self.pool(x) else: pool_x = x phi_x = self.phi(pool_x).view(batch_size, self.dim_inner, -1) g_x = self.g(pool_x).view(batch_size, self.dim_inner, -1) theta_phi = torch.matmul(theta_x, phi_x) if self.use_scale: theta_phi_sc = theta_phi * self.dim_inner ** -0.5 else: theta_phi_sc = theta_phi p_x = F.softmax(theta_phi_sc, dim=-1) p_x = p_x.permute(0, 2, 1) t_x = torch.matmul(g_x, p_x) t_x = t_x.view(batch_size, self.dim_inner, *x.size()[2:]) y = self.out(t_x) if self.use_gn: y = self.gn(y) return y + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_inner': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](primals_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 2, 2), (16, 4, 2, 1)) buf4 = buf0 del buf0 triton_poi_fused_convolution_1[grid(256)](buf4, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf5 = buf2 del buf2 triton_poi_fused_convolution_2[grid(64)](buf5, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_4[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf9, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf7, (4, 4, 16), (64, 16, 1), 0) del buf7 extern_kernels.bmm(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (4, 4, 16), (64, 1, 4), 0), out=buf10) buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = buf11 del buf11 triton_poi_fused_add_convolution_5[grid(256)](buf12, primals_9, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (buf12, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf8, reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0)) class NonLocal2dNew(nn.Module): def __init__(self, dim_in, dim_inner, dim_out, max_pool_stride=2, use_maxpool=True, use_gn=False, use_scale=True): super().__init__() self.dim_inner = dim_inner self.use_maxpool = use_maxpool self.use_gn = use_gn self.use_scale = use_scale self.theta = nn.Conv2d(dim_in, dim_inner, 1, stride=1, padding=0) if self.use_maxpool: self.pool = nn.MaxPool2d(kernel_size=max_pool_stride, stride= max_pool_stride, padding=0) self.phi = nn.Conv2d(dim_in, dim_inner, 1, stride=1, padding=0) self.g = nn.Conv2d(dim_in, dim_inner, 1, stride=1, padding=0) self.out = nn.Conv2d(dim_inner, dim_out, 1, stride=1, padding=0) if self.use_gn: self.gn = nn.GroupNorm(32, dim_out, eps=1e-05) self._init_weights() def _init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.GroupNorm): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, input_0): primals_2 = self.theta.weight primals_3 = self.theta.bias primals_4 = self.phi.weight primals_5 = self.phi.bias primals_6 = self.g.weight primals_7 = self.g.bias primals_8 = self.out.weight primals_9 = self.out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
shunya-toyokawa/qanet_human_parts_segmentatiom
NonLocal2d
false
16,519
[ "MIT" ]
72
5527b247acd65534b455c26e3692a14b31669602
https://github.com/shunya-toyokawa/qanet_human_parts_segmentatiom/tree/5527b247acd65534b455c26e3692a14b31669602
BinaryTreeComposer
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class BinaryTreeComposer(nn.Module): """ local lc, lh = nn.Identity()(), nn.Identity()() local rc, rh = nn.Identity()(), nn.Identity()() local new_gate = function() return nn.CAddTable(){ nn.Linear(self.mem_dim, self.mem_dim)(lh), nn.Linear(self.mem_dim, self.mem_dim)(rh) } end local i = nn.Sigmoid()(new_gate()) -- input gate local lf = nn.Sigmoid()(new_gate()) -- left forget gate local rf = nn.Sigmoid()(new_gate()) -- right forget gate local update = nn.Tanh()(new_gate()) -- memory cell update vector local c = nn.CAddTable(){ -- memory cell nn.CMulTable(){i, update}, nn.CMulTable(){lf, lc}, nn.CMulTable(){rf, rc} } local h if self.gate_output then local o = nn.Sigmoid()(new_gate()) -- output gate h = nn.CMulTable(){o, nn.Tanh()(c)} else h = nn.Tanh()(c) end local composer = nn.gModule( {lc, lh, rc, rh}, {c, h}) """ def __init__(self, cuda, in_dim, mem_dim): super(BinaryTreeComposer, self).__init__() self.cudaFlag = cuda self.in_dim = in_dim self.mem_dim = mem_dim def new_gate(): lh = nn.Linear(self.mem_dim, self.mem_dim) rh = nn.Linear(self.mem_dim, self.mem_dim) return lh, rh self.ilh, self.irh = new_gate() self.lflh, self.lfrh = new_gate() self.rflh, self.rfrh = new_gate() self.ulh, self.urh = new_gate() if self.cudaFlag: self.ilh = self.ilh self.irh = self.irh self.lflh = self.lflh self.lfrh = self.lfrh self.rflh = self.rflh self.rfrh = self.rfrh self.ulh = self.ulh def forward(self, lc, lh, rc, rh): i = F.sigmoid(self.ilh(lh) + self.irh(rh)) lf = F.sigmoid(self.lflh(lh) + self.lfrh(rh)) rf = F.sigmoid(self.rflh(lh) + self.rfrh(rh)) update = F.tanh(self.ulh(lh) + self.urh(rh)) c = i * update + lf * lc + rf * rc h = F.tanh(c) return c, h def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cuda': False, 'in_dim': 4, 'mem_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_out_ptr1 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, xmask) tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr6 + x2, xmask) tmp18 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr8 + x2, xmask) tmp21 = tl.load(in_ptr9 + x0, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr10 + x2, xmask) tmp28 = tl.load(in_ptr11 + x2, xmask) tmp29 = tl.load(in_ptr12 + x0, xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr13 + x2, xmask) tmp32 = tl.load(in_ptr14 + x0, xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr15 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = libdevice.tanh(tmp14) tmp16 = tmp7 * tmp15 tmp19 = tmp17 + tmp18 tmp22 = tmp20 + tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.sigmoid(tmp23) tmp26 = tmp24 * tmp25 tmp27 = tmp16 + tmp26 tmp30 = tmp28 + tmp29 tmp33 = tmp31 + tmp32 tmp34 = tmp30 + tmp33 tmp35 = tl.sigmoid(tmp34) tmp37 = tmp35 * tmp36 tmp38 = tmp27 + tmp37 tmp39 = 1.0 tmp40 = tmp39 - tmp35 tmp41 = tmp35 * tmp40 tmp42 = tmp39 - tmp24 tmp43 = tmp24 * tmp42 tmp44 = libdevice.tanh(tmp38) tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(in_out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr0 + x2, tmp38, xmask) tl.store(out_ptr1 + x2, tmp41, xmask) tl.store(out_ptr2 + x2, tmp43, xmask) tl.store(out_ptr3 + x2, tmp44, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_20, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf4) del primals_9 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf5) del primals_11 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf6) del primals_13 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf7) del primals_15 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf8) del primals_17 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(256)]( buf2, buf9, primals_2, buf1, primals_5, primals_16, buf8, primals_18, buf3, primals_8, buf4, primals_10, primals_19, buf5, primals_12, buf6, primals_14, primals_20, buf10, buf12, buf13, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del buf4 del buf5 del buf6 del buf8 del primals_10 del primals_12 del primals_14 del primals_16 del primals_18 del primals_2 del primals_5 del primals_8 return buf10, buf11, primals_19, primals_20, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), buf2, buf9, buf11, buf12, buf13 class BinaryTreeComposerNew(nn.Module): """ local lc, lh = nn.Identity()(), nn.Identity()() local rc, rh = nn.Identity()(), nn.Identity()() local new_gate = function() return nn.CAddTable(){ nn.Linear(self.mem_dim, self.mem_dim)(lh), nn.Linear(self.mem_dim, self.mem_dim)(rh) } end local i = nn.Sigmoid()(new_gate()) -- input gate local lf = nn.Sigmoid()(new_gate()) -- left forget gate local rf = nn.Sigmoid()(new_gate()) -- right forget gate local update = nn.Tanh()(new_gate()) -- memory cell update vector local c = nn.CAddTable(){ -- memory cell nn.CMulTable(){i, update}, nn.CMulTable(){lf, lc}, nn.CMulTable(){rf, rc} } local h if self.gate_output then local o = nn.Sigmoid()(new_gate()) -- output gate h = nn.CMulTable(){o, nn.Tanh()(c)} else h = nn.Tanh()(c) end local composer = nn.gModule( {lc, lh, rc, rh}, {c, h}) """ def __init__(self, cuda, in_dim, mem_dim): super(BinaryTreeComposerNew, self).__init__() self.cudaFlag = cuda self.in_dim = in_dim self.mem_dim = mem_dim def new_gate(): lh = nn.Linear(self.mem_dim, self.mem_dim) rh = nn.Linear(self.mem_dim, self.mem_dim) return lh, rh self.ilh, self.irh = new_gate() self.lflh, self.lfrh = new_gate() self.rflh, self.rfrh = new_gate() self.ulh, self.urh = new_gate() if self.cudaFlag: self.ilh = self.ilh self.irh = self.irh self.lflh = self.lflh self.lfrh = self.lfrh self.rflh = self.rflh self.rfrh = self.rfrh self.ulh = self.ulh def forward(self, input_0, input_1, input_2, input_3): primals_1 = self.ilh.weight primals_2 = self.ilh.bias primals_4 = self.irh.weight primals_5 = self.irh.bias primals_7 = self.lflh.weight primals_8 = self.lflh.bias primals_9 = self.lfrh.weight primals_10 = self.lfrh.bias primals_11 = self.rflh.weight primals_12 = self.rflh.bias primals_13 = self.rfrh.weight primals_14 = self.rfrh.bias primals_15 = self.ulh.weight primals_16 = self.ulh.bias primals_17 = self.urh.weight primals_18 = self.urh.bias primals_3 = input_0 primals_6 = input_1 primals_19 = input_2 primals_20 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0], output[1]
supunab/Lantern
BinaryTreeComposer
false
16,520
[ "BSD-3-Clause" ]
158
932a031816617d71c46653f3b2245129a6a8a7c8
https://github.com/supunab/Lantern/tree/932a031816617d71c46653f3b2245129a6a8a7c8
reduction_1x1
import math import torch import torch.nn as nn import torch.utils.data.distributed class reduction_1x1(nn.Sequential): def __init__(self, num_in_filters, num_out_filters, max_depth, is_final =False): super(reduction_1x1, self).__init__() self.max_depth = max_depth self.is_final = is_final self.sigmoid = nn.Sigmoid() self.reduc = torch.nn.Sequential() while num_out_filters >= 4: if num_out_filters < 8: if self.is_final: self.reduc.add_module('final', torch.nn.Sequential(nn. Conv2d(num_in_filters, out_channels=1, bias=False, kernel_size=1, stride=1, padding=0), nn.Sigmoid())) else: self.reduc.add_module('plane_params', torch.nn.Conv2d( num_in_filters, out_channels=3, bias=False, kernel_size=1, stride=1, padding=0)) break else: self.reduc.add_module('inter_{}_{}'.format(num_in_filters, num_out_filters), torch.nn.Sequential(nn.Conv2d( in_channels=num_in_filters, out_channels= num_out_filters, bias=False, kernel_size=1, stride=1, padding=0), nn.ELU())) num_in_filters = num_out_filters num_out_filters = num_out_filters // 2 def forward(self, net): net = self.reduc.forward(net) if not self.is_final: theta = self.sigmoid(net[:, 0, :, :]) * math.pi / 3 phi = self.sigmoid(net[:, 1, :, :]) * math.pi * 2 dist = self.sigmoid(net[:, 2, :, :]) * self.max_depth n1 = torch.mul(torch.sin(theta), torch.cos(phi)).unsqueeze(1) n2 = torch.mul(torch.sin(theta), torch.sin(phi)).unsqueeze(1) n3 = torch.cos(theta).unsqueeze(1) n4 = dist.unsqueeze(1) net = torch.cat([n1, n2, n3, n4], dim=1) return net def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_in_filters': 4, 'num_out_filters': 4, 'max_depth': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 48 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = 3.141592653589793 tmp8 = tmp6 * tmp7 tmp9 = 0.3333333333333333 tmp10 = tmp8 * tmp9 tmp11 = tl_math.sin(tmp10) tmp12 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tmp13 * tmp7 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tl_math.cos(tmp16) tmp18 = tmp11 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tmp22 = tl.full([1], 2, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (x0 + 48 * x2), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.sigmoid(tmp25) tmp27 = tmp26 * tmp7 tmp28 = tmp27 * tmp9 tmp29 = tl_math.sin(tmp28) tmp30 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.sigmoid(tmp30) tmp32 = tmp31 * tmp7 tmp33 = tmp32 * tmp15 tmp34 = tl_math.sin(tmp33) tmp35 = tmp29 * tmp34 tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp24, tmp35, tmp36) tmp38 = tmp0 >= tmp22 tmp39 = tl.full([1], 3, tl.int64) tmp40 = tmp0 < tmp39 tmp41 = tmp38 & tmp40 tmp42 = tl.load(in_ptr0 + (x0 + 48 * x2), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tl.sigmoid(tmp42) tmp44 = tmp43 * tmp7 tmp45 = tmp44 * tmp9 tmp46 = tl_math.cos(tmp45) tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp41, tmp46, tmp47) tmp49 = tmp0 >= tmp39 tl.full([1], 4, tl.int64) tmp52 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), tmp49 & xmask, eviction_policy='evict_last', other=0.0) tmp53 = tl.sigmoid(tmp52) tmp54 = 1.0 tmp55 = tmp53 * tmp54 tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype) tmp57 = tl.where(tmp49, tmp55, tmp56) tmp58 = tl.where(tmp41, tmp48, tmp57) tmp59 = tl.where(tmp24, tmp37, tmp58) tmp60 = tl.where(tmp4, tmp20, tmp59) tl.store(out_ptr0 + x3, tmp60, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 4, 4), (48, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf0 class reduction_1x1New(nn.Sequential): def __init__(self, num_in_filters, num_out_filters, max_depth, is_final =False): super(reduction_1x1New, self).__init__() self.max_depth = max_depth self.is_final = is_final self.sigmoid = nn.Sigmoid() self.reduc = torch.nn.Sequential() while num_out_filters >= 4: if num_out_filters < 8: if self.is_final: self.reduc.add_module('final', torch.nn.Sequential(nn. Conv2d(num_in_filters, out_channels=1, bias=False, kernel_size=1, stride=1, padding=0), nn.Sigmoid())) else: self.reduc.add_module('plane_params', torch.nn.Conv2d( num_in_filters, out_channels=3, bias=False, kernel_size=1, stride=1, padding=0)) break else: self.reduc.add_module('inter_{}_{}'.format(num_in_filters, num_out_filters), torch.nn.Sequential(nn.Conv2d( in_channels=num_in_filters, out_channels= num_out_filters, bias=False, kernel_size=1, stride=1, padding=0), nn.ELU())) num_in_filters = num_out_filters num_out_filters = num_out_filters // 2 def forward(self, input_0): primals_1 = self.reduc.plane_params.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
syKevinPeng/TransDepth
reduction_1x1
false
16,521
[ "MIT" ]
118
2282039da7bc0812e19a27b2d73a25bdef97d739
https://github.com/syKevinPeng/TransDepth/tree/2282039da7bc0812e19a27b2d73a25bdef97d739
SelfAttention
import torch import torch.nn as nn from scipy.sparse import * class SelfAttention(nn.Module): def __init__(self, input_size, hidden_size): super(SelfAttention, self).__init__() self.W1 = torch.Tensor(input_size, hidden_size) self.W1 = nn.Parameter(nn.init.xavier_uniform_(self.W1)) self.W2 = torch.Tensor(hidden_size, 1) self.W2 = nn.Parameter(nn.init.xavier_uniform_(self.W2)) def forward(self, x, attention_mask=None): attention = torch.mm(torch.tanh(torch.mm(x.view(-1, x.size(-1)), self.W1)), self.W2).view(x.size(0), -1) if attention_mask is not None: attention = attention.masked_fill_(1 - attention_mask.byte(), -INF) probs = torch.softmax(attention, dim=-1).unsqueeze(1) weighted_x = torch.bmm(probs, x).squeeze(1) return weighted_x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0 ), primals_1, out=buf5) del buf4 return reinterpret_tensor(buf5, (4, 4), (4, 1), 0 ), primals_1, buf1, buf2, reinterpret_tensor(primals_3, (1, 4), (1, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, input_size, hidden_size): super(SelfAttentionNew, self).__init__() self.W1 = torch.Tensor(input_size, hidden_size) self.W1 = nn.Parameter(nn.init.xavier_uniform_(self.W1)) self.W2 = torch.Tensor(hidden_size, 1) self.W2 = nn.Parameter(nn.init.xavier_uniform_(self.W2)) def forward(self, input_0): primals_2 = self.W1 primals_3 = self.W2 primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
talha1503/RL-based-Graph2Seq-for-NQG
SelfAttention
false
16,522
[ "Apache-2.0" ]
100
1039e0b6231ae7029ea6e4073b1e55df5ad2e928
https://github.com/talha1503/RL-based-Graph2Seq-for-NQG/tree/1039e0b6231ae7029ea6e4073b1e55df5ad2e928
SEBlock
import torch import torch.nn as nn import torch.nn.functional as F class SEBlock(nn.Module): def __init__(self, input_channels, internal_neurons): super(SEBlock, self).__init__() self.down = nn.Conv2d(in_channels=input_channels, out_channels= internal_neurons, kernel_size=1, stride=1, bias=True) self.up = nn.Conv2d(in_channels=internal_neurons, out_channels= input_channels, kernel_size=1, stride=1, bias=True) def forward(self, inputs): x = F.avg_pool2d(inputs, kernel_size=inputs.size(3)) x = self.down(x) x = F.relu(x) x = self.up(x) x = F.sigmoid(x) x = x.repeat(1, 1, inputs.size(2), inputs.size(3)) return inputs * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'internal_neurons': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_repeat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_sigmoid_2[grid(16)](buf4, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_repeat_3[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, primals_2, primals_4, buf0, buf2, buf4 class SEBlockNew(nn.Module): def __init__(self, input_channels, internal_neurons): super(SEBlockNew, self).__init__() self.down = nn.Conv2d(in_channels=input_channels, out_channels= internal_neurons, kernel_size=1, stride=1, bias=True) self.up = nn.Conv2d(in_channels=internal_neurons, out_channels= input_channels, kernel_size=1, stride=1, bias=True) def forward(self, input_0): primals_2 = self.down.weight primals_3 = self.down.bias primals_4 = self.up.weight primals_5 = self.up.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
sysu-shey/ACNet
SEBlock
false
16,523
[ "MIT" ]
767
6d967d3fff2d79a37f85799b78a21ffbd9001bd2
https://github.com/sysu-shey/ACNet/tree/6d967d3fff2d79a37f85799b78a21ffbd9001bd2
FocalLoss
import torch import torch.nn as nn class FocalLoss(nn.Module): def __init__(self, gamma=0, alpha=None, device=None): super(FocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha if self.alpha is not None: self.alpha = torch.FloatTensor([1 - alpha, alpha]) def forward(self, pred, target): batch_size, n_pts = pred.size() pos = torch.sigmoid(pred) neg = 1 - pos pt = torch.stack([neg, pos], dim=-1).view(-1, 2) index = target.view(-1, 1).long() pt = pt.gather(-1, index).view(-1) logpt = pt.log() if self.alpha is not None: at = self.alpha.gather(0, index.view(-1)) logpt = logpt * at loss = -1 * (1 - pt) ** self.gamma * logpt return loss.view(batch_size, n_pts) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_log_mul_pow_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 2, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 2) | ~xmask, 'index out of bounds: 0 <= tmp5 < 2') tmp7 = tmp5 tl.full([1], 0, tl.int64) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp7 < tmp10 tmp12 = tl.load(in_ptr1 + x0, tmp11 & xmask, other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = 1.0 tmp15 = tmp14 - tmp13 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tmp7 >= tmp10 tl.full([1], 2, tl.int64) tmp21 = tl.load(in_ptr1 + x0, tmp18 & xmask, other=0.0) tmp22 = tl.sigmoid(tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp18, tmp22, tmp23) tmp25 = tl.where(tmp11, tmp17, tmp24) tmp14 - tmp25 tmp27 = tl_math.log(tmp25) tmp28 = -1.0 tmp29 = tmp28 * tmp27 tl.store(out_ptr0 + x0, tmp29, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_log_mul_pow_rsub_0[grid(16)](arg1_1, arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class FocalLossNew(nn.Module): def __init__(self, gamma=0, alpha=None, device=None): super(FocalLossNew, self).__init__() self.gamma = gamma self.alpha = alpha if self.alpha is not None: self.alpha = torch.FloatTensor([1 - alpha, alpha]) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
taconite/PTF
FocalLoss
false
16,524
[ "MIT" ]
62
a8789c9f752aea2944c2a75e04cc2aa21c7e4a00
https://github.com/taconite/PTF/tree/a8789c9f752aea2944c2a75e04cc2aa21c7e4a00
ResnetBlockInplaceNormShallowConv1d
import torch import torch.nn as nn class ResnetBlockInplaceNormShallowConv1d(nn.Module): """ Fully connected ResNet Block imeplemented with group convolutions and weight/spectral normalizations. Args: size_in (int): input dimension groups (int): number of groups for group convolutions size_out (int): output dimension size_h (int): hidden dimension """ def __init__(self, size_in, groups, norm_method='weight_norm', size_out =None, size_h=None, dropout_prob=0.0, leaky=False): super().__init__() if size_out is None: size_out = size_in if size_h is None: size_h = min(size_in, size_out) if dropout_prob > 0.0: self.dropout = nn.Dropout(dropout_prob, inplace=True) else: self.dropout = None self.size_in = size_in self.size_h = size_h self.size_out = size_out fc_0 = nn.Conv1d(size_in, size_h, 1, groups=groups, bias=False) if norm_method == 'weight_norm': self.fc_0 = nn.utils.weight_norm(fc_0) elif norm_method == 'spectral_norm': self.fc_0 = nn.utils.spectral_norm(fc_0) else: raise ValueError('Normalization method {} not supported.'. format(norm_method)) if not leaky: self.actvn = nn.ReLU() else: self.actvn = nn.LeakyReLU(0.1) if size_in == size_out: self.shortcut = None else: self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False, groups=groups) def forward(self, x): if self.dropout is not None: dx = self.fc_0(self.dropout(self.actvn(x))) else: dx = self.fc_0(self.actvn(x)) if self.shortcut is not None: x_s = self.shortcut(x) else: x_s = x return x_s + dx def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'size_in': 4, 'groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(4)](primals_3, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(16)](primals_3, primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_relu_2[grid(16)](primals_1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4 ), (0, 4, 1), 0), buf1, stride=(1,), padding=(0,), dilation=(1, ), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 4), (16, 4, 1)) buf4 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0) del buf3 triton_poi_fused_add_3[grid(16)](buf4, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 return buf4, buf1, primals_2, primals_3, buf0, buf1, reinterpret_tensor( buf2, (1, 4, 4), (16, 4, 1), 0) class ResnetBlockInplaceNormShallowConv1dNew(nn.Module): """ Fully connected ResNet Block imeplemented with group convolutions and weight/spectral normalizations. Args: size_in (int): input dimension groups (int): number of groups for group convolutions size_out (int): output dimension size_h (int): hidden dimension """ def __init__(self, size_in, groups, norm_method='weight_norm', size_out =None, size_h=None, dropout_prob=0.0, leaky=False): super().__init__() if size_out is None: size_out = size_in if size_h is None: size_h = min(size_in, size_out) if dropout_prob > 0.0: self.dropout = nn.Dropout(dropout_prob, inplace=True) else: self.dropout = None self.size_in = size_in self.size_h = size_h self.size_out = size_out fc_0 = nn.Conv1d(size_in, size_h, 1, groups=groups, bias=False) if norm_method == 'weight_norm': self.fc_0 = nn.utils.weight_norm(fc_0) elif norm_method == 'spectral_norm': self.fc_0 = nn.utils.spectral_norm(fc_0) else: raise ValueError('Normalization method {} not supported.'. format(norm_method)) if not leaky: self.actvn = nn.ReLU() else: self.actvn = nn.LeakyReLU(0.1) if size_in == size_out: self.shortcut = None else: self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False, groups=groups) def forward(self, input_0): primals_2 = self.fc_0.weight_g primals_3 = self.fc_0.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
taconite/MetaAvatar-release
ResnetBlockInplaceNormShallowConv1d
false
16,525
[ "MIT" ]
60
c9403a478ee82232633d25f65f108befd21d04e9
https://github.com/taconite/MetaAvatar-release/tree/c9403a478ee82232633d25f65f108befd21d04e9
ResnetBlockGroupNormConv1d
import torch import torch.nn as nn class GroupNorm1d(nn.Module): """ Group normalization that does per-point group normalization. Args: groups (int): number of groups f_dim (int): feature dimension, mush be divisible by groups """ def __init__(self, groups, f_dim, eps=1e-05, affine=True): super().__init__() self.groups = groups self.f_dim = f_dim self.affine = affine self.eps = eps assert f_dim % groups == 0 if affine: self.gamma = nn.Parameter(torch.ones(1, f_dim, 1)) self.beta = nn.Parameter(torch.zeros(1, f_dim, 1)) def forward(self, x): batch_size, D, T = x.size() net = x.view(batch_size, self.groups, D // self.groups, T) means = net.mean(2, keepdim=True) variances = net.var(2, keepdim=True) net = (net - means) / (variances + self.eps).sqrt() net = net.view(batch_size, D, T) if self.affine: return net * self.gamma + self.beta else: return net class ResnetBlockGroupNormConv1d(nn.Module): """ Fully connected ResNet Block imeplemented with group convolutions and group normalizations. Args: size_in (int): input dimension groups (int): number of groups for group convolutions gn_groups (int): number of groups for group normalizations size_out (int): output dimension size_h (int): hidden dimension """ def __init__(self, size_in, groups, gn_groups=4, size_out=None, size_h= None, dropout_prob=0.0, leaky=False): super().__init__() if size_out is None: size_out = size_in if size_h is None: size_h = min(size_in, size_out) if dropout_prob > 0.0: self.dropout = nn.Dropout(dropout_prob, inplace=True) else: self.dropout = None self.size_in = size_in self.size_h = size_h self.size_out = size_out self.gn_0 = GroupNorm1d(groups * gn_groups, size_in) self.gn_1 = GroupNorm1d(groups * gn_groups, size_h) self.fc_0 = nn.Conv1d(size_in, size_h, 1, groups=groups, bias=False) self.fc_1 = nn.Conv1d(size_h, size_out, 1, groups=groups, bias=False) if not leaky: self.actvn = nn.ReLU() else: self.actvn = nn.LeakyReLU(0.1) if size_in == size_out: self.shortcut = None else: self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False, groups=groups) nn.init.zeros_(self.fc_1.weight) def forward(self, x): if self.dropout is not None: net = self.fc_0(self.dropout(self.actvn(self.gn_0(x)))) dx = self.fc_1(self.dropout(self.actvn(self.gn_1(net)))) else: net = self.fc_0(self.actvn(self.gn_0(x))) dx = self.fc_1(self.actvn(self.gn_1(net))) if self.shortcut is not None: x_s = self.shortcut(x) else: x_s = x return x_s + dx def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'size_in': 4, 'groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp11 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 / tmp1 tmp3 = tmp0 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = 0.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp3 / tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tl.full([1], 0, tl.int32) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_0[grid(64)](primals_1, primals_2, primals_3, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf1 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_relu_0[grid(64)](buf1, primals_5, primals_6, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_1[grid(64)](buf4, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf4, primals_1, primals_4, primals_5, primals_7, buf0, buf1, buf2 class GroupNorm1d(nn.Module): """ Group normalization that does per-point group normalization. Args: groups (int): number of groups f_dim (int): feature dimension, mush be divisible by groups """ def __init__(self, groups, f_dim, eps=1e-05, affine=True): super().__init__() self.groups = groups self.f_dim = f_dim self.affine = affine self.eps = eps assert f_dim % groups == 0 if affine: self.gamma = nn.Parameter(torch.ones(1, f_dim, 1)) self.beta = nn.Parameter(torch.zeros(1, f_dim, 1)) def forward(self, x): batch_size, D, T = x.size() net = x.view(batch_size, self.groups, D // self.groups, T) means = net.mean(2, keepdim=True) variances = net.var(2, keepdim=True) net = (net - means) / (variances + self.eps).sqrt() net = net.view(batch_size, D, T) if self.affine: return net * self.gamma + self.beta else: return net class ResnetBlockGroupNormConv1dNew(nn.Module): """ Fully connected ResNet Block imeplemented with group convolutions and group normalizations. Args: size_in (int): input dimension groups (int): number of groups for group convolutions gn_groups (int): number of groups for group normalizations size_out (int): output dimension size_h (int): hidden dimension """ def __init__(self, size_in, groups, gn_groups=4, size_out=None, size_h= None, dropout_prob=0.0, leaky=False): super().__init__() if size_out is None: size_out = size_in if size_h is None: size_h = min(size_in, size_out) if dropout_prob > 0.0: self.dropout = nn.Dropout(dropout_prob, inplace=True) else: self.dropout = None self.size_in = size_in self.size_h = size_h self.size_out = size_out self.gn_0 = GroupNorm1d(groups * gn_groups, size_in) self.gn_1 = GroupNorm1d(groups * gn_groups, size_h) self.fc_0 = nn.Conv1d(size_in, size_h, 1, groups=groups, bias=False) self.fc_1 = nn.Conv1d(size_h, size_out, 1, groups=groups, bias=False) if not leaky: self.actvn = nn.ReLU() else: self.actvn = nn.LeakyReLU(0.1) if size_in == size_out: self.shortcut = None else: self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False, groups=groups) nn.init.zeros_(self.fc_1.weight) def forward(self, input_0): primals_2 = self.gn_0.gamma primals_3 = self.gn_0.beta primals_5 = self.gn_1.gamma primals_6 = self.gn_1.beta primals_4 = self.fc_0.weight primals_7 = self.fc_1.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
taconite/MetaAvatar-release
ResnetBlockGroupNormConv1d
false
16,526
[ "MIT" ]
60
c9403a478ee82232633d25f65f108befd21d04e9
https://github.com/taconite/MetaAvatar-release/tree/c9403a478ee82232633d25f65f108befd21d04e9
GatedFusion
import torch import torch.nn as nn from scipy.sparse import * class GatedFusion(nn.Module): def __init__(self, hidden_size): super(GatedFusion, self).__init__() """GatedFusion module""" self.fc_z = nn.Linear(4 * hidden_size, hidden_size, bias=True) def forward(self, h_state, input): z = torch.sigmoid(self.fc_z(torch.cat([h_state, input, h_state * input, h_state - input], -1))) h_state = (1 - z) * h_state + z * input return h_state def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tmp3 * tmp4 tmp7 = tmp1 * tmp6 tmp8 = tmp5 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1024)](primals_1, primals_2, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_1[grid(256)](buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_2, reinterpret_tensor(buf0, (64, 16), ( 16, 1), 0), buf1 class GatedFusionNew(nn.Module): def __init__(self, hidden_size): super(GatedFusionNew, self).__init__() """GatedFusion module""" self.fc_z = nn.Linear(4 * hidden_size, hidden_size, bias=True) def forward(self, input_0, input_1): primals_3 = self.fc_z.weight primals_4 = self.fc_z.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
talha1503/RL-based-Graph2Seq-for-NQG
GatedFusion
false
16,527
[ "Apache-2.0" ]
100
1039e0b6231ae7029ea6e4073b1e55df5ad2e928
https://github.com/talha1503/RL-based-Graph2Seq-for-NQG/tree/1039e0b6231ae7029ea6e4073b1e55df5ad2e928
ResnetBlockGroupNormShallowConv1d
import torch import torch.nn as nn class GroupNorm1d(nn.Module): """ Group normalization that does per-point group normalization. Args: groups (int): number of groups f_dim (int): feature dimension, mush be divisible by groups """ def __init__(self, groups, f_dim, eps=1e-05, affine=True): super().__init__() self.groups = groups self.f_dim = f_dim self.affine = affine self.eps = eps assert f_dim % groups == 0 if affine: self.gamma = nn.Parameter(torch.ones(1, f_dim, 1)) self.beta = nn.Parameter(torch.zeros(1, f_dim, 1)) def forward(self, x): batch_size, D, T = x.size() net = x.view(batch_size, self.groups, D // self.groups, T) means = net.mean(2, keepdim=True) variances = net.var(2, keepdim=True) net = (net - means) / (variances + self.eps).sqrt() net = net.view(batch_size, D, T) if self.affine: return net * self.gamma + self.beta else: return net class ResnetBlockGroupNormShallowConv1d(nn.Module): """ Fully connected ResNet Block imeplemented with group convolutions and group normalizations. Args: size_in (int): input dimension groups (int): number of groups for group convolutions gn_groups (int): number of groups for group normalizations size_out (int): output dimension size_h (int): hidden dimension """ def __init__(self, size_in, groups, gn_groups=4, size_out=None, size_h= None, dropout_prob=0.0, leaky=False): super().__init__() if size_out is None: size_out = size_in if size_h is None: size_h = min(size_in, size_out) if dropout_prob > 0.0: self.dropout = nn.Dropout(dropout_prob, inplace=True) else: self.dropout = None self.size_in = size_in self.size_h = size_h self.size_out = size_out self.gn_0 = GroupNorm1d(groups * gn_groups, size_in) self.fc_0 = nn.Conv1d(size_in, size_h, 1, groups=groups, bias=False) if not leaky: self.actvn = nn.ReLU() else: self.actvn = nn.LeakyReLU(0.1) if size_in == size_out: self.shortcut = None else: self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False, groups=groups) def forward(self, x): if self.dropout is not None: dx = self.fc_0(self.dropout(self.actvn(self.gn_0(x)))) else: dx = self.fc_0(self.actvn(self.gn_0(x))) if self.shortcut is not None: x_s = self.shortcut(x) else: x_s = x return x_s + dx def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'size_in': 4, 'groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp11 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 / tmp1 tmp3 = tmp0 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = 0.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp3 / tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tl.full([1], 0, tl.int32) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_0[grid(64)](primals_1, primals_2, primals_3, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf1 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_add_1[grid(64)](buf2, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, primals_1, primals_4, buf0 class GroupNorm1d(nn.Module): """ Group normalization that does per-point group normalization. Args: groups (int): number of groups f_dim (int): feature dimension, mush be divisible by groups """ def __init__(self, groups, f_dim, eps=1e-05, affine=True): super().__init__() self.groups = groups self.f_dim = f_dim self.affine = affine self.eps = eps assert f_dim % groups == 0 if affine: self.gamma = nn.Parameter(torch.ones(1, f_dim, 1)) self.beta = nn.Parameter(torch.zeros(1, f_dim, 1)) def forward(self, x): batch_size, D, T = x.size() net = x.view(batch_size, self.groups, D // self.groups, T) means = net.mean(2, keepdim=True) variances = net.var(2, keepdim=True) net = (net - means) / (variances + self.eps).sqrt() net = net.view(batch_size, D, T) if self.affine: return net * self.gamma + self.beta else: return net class ResnetBlockGroupNormShallowConv1dNew(nn.Module): """ Fully connected ResNet Block imeplemented with group convolutions and group normalizations. Args: size_in (int): input dimension groups (int): number of groups for group convolutions gn_groups (int): number of groups for group normalizations size_out (int): output dimension size_h (int): hidden dimension """ def __init__(self, size_in, groups, gn_groups=4, size_out=None, size_h= None, dropout_prob=0.0, leaky=False): super().__init__() if size_out is None: size_out = size_in if size_h is None: size_h = min(size_in, size_out) if dropout_prob > 0.0: self.dropout = nn.Dropout(dropout_prob, inplace=True) else: self.dropout = None self.size_in = size_in self.size_h = size_h self.size_out = size_out self.gn_0 = GroupNorm1d(groups * gn_groups, size_in) self.fc_0 = nn.Conv1d(size_in, size_h, 1, groups=groups, bias=False) if not leaky: self.actvn = nn.ReLU() else: self.actvn = nn.LeakyReLU(0.1) if size_in == size_out: self.shortcut = None else: self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False, groups=groups) def forward(self, input_0): primals_2 = self.gn_0.gamma primals_3 = self.gn_0.beta primals_4 = self.fc_0.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
taconite/MetaAvatar-release
ResnetBlockGroupNormShallowConv1d
false
16,528
[ "MIT" ]
60
c9403a478ee82232633d25f65f108befd21d04e9
https://github.com/taconite/MetaAvatar-release/tree/c9403a478ee82232633d25f65f108befd21d04e9
PatchEmbed
import torch import torch.nn as nn from torch import optim as optim class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() num_patches = img_size // patch_size * (img_size // patch_size) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): _B, _C, _H, _W = x.shape return self.proj(x) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 2304 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 768 y1 = yindex // 768 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 12288 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (768, 3, 16, 16), (768, 256, 16, 1)) assert_size_stride(primals_3, (768,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch. float32) triton_poi_fused_1[grid(2304, 256)](primals_2, buf1, 2304, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf0, buf1, stride=(16, 16), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768)) buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch. float32) triton_poi_fused_convolution_2[grid(3072, 16)](buf2, primals_3, buf3, 3072, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del buf2 del primals_3 return buf3, buf0, buf1 class PatchEmbedNew(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() num_patches = img_size // patch_size * (img_size // patch_size) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
taokong/ibot
PatchEmbed
false
16,529
[ "Apache-2.0" ]
327
a2ee1ae7495d4ea8fb9ba100434c062f1bd3d1f0
https://github.com/taokong/ibot/tree/a2ee1ae7495d4ea8fb9ba100434c062f1bd3d1f0
silog_loss
import torch import torch.nn as nn import torch.utils.data.distributed class silog_loss(nn.Module): def __init__(self, variance_focus): super(silog_loss, self).__init__() self.variance_focus = variance_focus def forward(self, depth_est, depth_gt, mask): d = torch.log(depth_est[mask]) - torch.log(depth_gt[mask]) return torch.sqrt((d ** 2).mean() - self.variance_focus * d.mean() ** 2 ) * 10.0 def get_inputs(): return [torch.ones([4], dtype=torch.int64), torch.ones([4], dtype=torch .int64), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'variance_focus': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_index_log_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + tmp4, None, eviction_policy='evict_last') tmp7 = tmp6.to(tl.float32) tmp8 = tl_math.log(tmp7) tmp9 = tl.load(in_ptr2 + tmp4, None, eviction_policy='evict_last') tmp10 = tmp9.to(tl.float32) tmp11 = tl_math.log(tmp10) tmp12 = tmp8 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = 4.0 tmp21 = tmp16 / tmp20 tmp22 = tmp19 / tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp23 * tmp20 tmp25 = tmp21 - tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 10.0 tmp28 = tmp26 * tmp27 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) assert_size_stride(arg1_1, (4,), (1,)) assert_size_stride(arg2_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_index_log_mean_mul_pow_sqrt_sub_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class silog_lossNew(nn.Module): def __init__(self, variance_focus): super(silog_lossNew, self).__init__() self.variance_focus = variance_focus def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
syKevinPeng/TransDepth
silog_loss
false
16,530
[ "MIT" ]
118
2282039da7bc0812e19a27b2d73a25bdef97d739
https://github.com/syKevinPeng/TransDepth/tree/2282039da7bc0812e19a27b2d73a25bdef97d739
SoftDiceLoss
import torch import torch.nn as nn class SoftDiceLoss(nn.Module): def __init__(self): super(SoftDiceLoss, self).__init__() def forward(self, output, label): probs = output.view(-1) mask = label.view(-1) smooth = 1 intersection = torch.sum(probs * mask) den1 = torch.sum(probs) den2 = torch.sum(mask) soft_dice = (2 * intersection + smooth) / (den1 + den2 + smooth) return -soft_dice def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = tmp15 / tmp17 tmp19 = -tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_neg_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class SoftDiceLossNew(nn.Module): def __init__(self): super(SoftDiceLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
tdml13/NiftyNet
SoftDiceLoss
false
16,531
[ "Apache-2.0" ]
1,403
b35fa19ca307e81d229e2fe8269a417724833da2
https://github.com/tdml13/NiftyNet/tree/b35fa19ca307e81d229e2fe8269a417724833da2
PatchMerging
import torch import torch.nn as nn import torch.nn.functional as F from math import sqrt from torch import optim as optim class PatchMerging(nn.Module): """Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, x): """ Forward function. Args: x: Input feature, tensor size (B, H*W, C). H, W: Spatial resolution of the input feature. """ B, L, C = x.shape H = int(sqrt(L)) W = H x = x.view(B, H, W, C) pad_input = H % 2 == 1 or W % 2 == 1 if pad_input: x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) x0 = x[:, 0::2, 0::2, :] x1 = x[:, 1::2, 0::2, :] x2 = x[:, 0::2, 1::2, :] x3 = x[:, 1::2, 1::2, :] x = torch.cat([x0, x1, x2, x3], -1) x = x.view(B, -1, 4 * C) x = self.norm(x) x = self.reduction(x) return x def extra_repr(self) ->str: return f'input_resolution={self.input_resolution}, dim={self.dim}' def flops(self): H, W = self.input_resolution flops = H * W * self.dim flops += H // 2 * (W // 2) * 4 * self.dim * 2 * self.dim return flops def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_resolution': 4, 'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp46 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 * x0 + r1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (8 + 16 * x0 + (-4 + r1)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 + 16 * x0 + (-8 + r1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1, 1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (12 + 16 * x0 + (-12 + r1)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tl.full([XBLOCK, 1], 16, tl.int32) tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 / tmp31 tmp33 = tmp23 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = 16.0 tmp40 = tmp38 / tmp39 tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = libdevice.rsqrt(tmp42) tmp44 = tmp22 - tmp32 tmp45 = tmp44 * tmp43 tmp47 = tmp45 * tmp46 tmp49 = tmp47 + tmp48 tl.store(out_ptr0 + (r1 + 16 * x0), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp43, xmask) tl.store(out_ptr2 + (r1 + 16 * x0), tmp49, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (8, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 16), (16, 16, 16, 1), torch.float32 ) buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf4 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused_cat_native_layer_norm_0[grid(4)](buf4, primals_1, primals_2, primals_3, buf0, buf1, buf5, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 del primals_3 buf6 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (4, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 8), (1, 16), 0), out=buf6) return reinterpret_tensor(buf6, (4, 1, 8), (8, 8, 1), 0 ), buf0, buf1, buf4, reinterpret_tensor(buf5, (4, 16), (16, 1), 0 ), primals_4 class PatchMergingNew(nn.Module): """Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def extra_repr(self) ->str: return f'input_resolution={self.input_resolution}, dim={self.dim}' def flops(self): H, W = self.input_resolution flops = H * W * self.dim flops += H // 2 * (W // 2) * 4 * self.dim * 2 * self.dim return flops def forward(self, input_0): primals_4 = self.reduction.weight primals_2 = self.norm.weight primals_3 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
taokong/ibot
PatchMerging
false
16,532
[ "Apache-2.0" ]
327
a2ee1ae7495d4ea8fb9ba100434c062f1bd3d1f0
https://github.com/taokong/ibot/tree/a2ee1ae7495d4ea8fb9ba100434c062f1bd3d1f0
ITN2D
import torch import torch.nn.functional as F import torch.nn as nn class ITN2D(nn.Module): def __init__(self, input_channels): super(ITN2D, self).__init__() use_bias = True self.conv11 = nn.Conv2d(input_channels, 2, kernel_size=3, padding=1, bias=use_bias) self.conv12 = nn.Conv2d(2, 4, kernel_size=3, padding=1, bias=use_bias) self.down1 = nn.Conv2d(4, 8, kernel_size=2, stride=2, bias=use_bias) self.conv21 = nn.Conv2d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.down2 = nn.Conv2d(8, 16, kernel_size=2, stride=2, bias=use_bias) self.conv31 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=use_bias ) self.up2 = nn.ConvTranspose2d(16, 8, kernel_size=2, stride=2, bias= use_bias) self.conv22 = nn.Conv2d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.up1 = nn.ConvTranspose2d(8, 4, kernel_size=2, stride=2, bias= use_bias) self.conv13 = nn.Conv2d(4, 2, kernel_size=3, padding=1, bias=use_bias) self.conv14 = nn.Conv2d(2, 2, kernel_size=3, padding=1, bias=use_bias) self.conv15 = nn.Conv2d(2, input_channels, kernel_size=3, padding=1, bias=use_bias) def forward(self, x): x1 = F.relu(self.conv11(x)) x1 = F.relu(self.conv12(x1)) x2 = self.down1(x1) x2 = F.relu(self.conv21(x2)) x3 = self.down2(x2) x3 = F.relu(self.conv31(x3)) x2 = self.up2(x3) + x2 x2 = F.relu(self.conv22(x2)) x1 = self.up1(x2) + x1 x1 = F.relu(self.conv13(x1)) x1 = F.relu(self.conv14(x1)) x = self.conv15(x1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (8,), (1,)) assert_size_stride(primals_10, (16, 8, 2, 2), (32, 4, 2, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_13, (16,), (1,)) assert_size_stride(primals_14, (16, 8, 2, 2), (32, 4, 2, 1)) assert_size_stride(primals_15, (8,), (1,)) assert_size_stride(primals_16, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_17, (8,), (1,)) assert_size_stride(primals_18, (8, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_21, (2,), (1,)) assert_size_stride(primals_22, (2, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_23, (2,), (1,)) assert_size_stride(primals_24, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_25, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 2, 2), (32, 4, 2, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(128)](buf5, primals_7, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 2, 2), (32, 4, 2, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(128)](buf7, primals_9, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 1, 1), (16, 1, 1, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(64)](buf9, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 1, 1), (16, 1, 1, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_5[grid(64)](buf11, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 8, 2, 2), (32, 4, 2, 1)) buf13 = buf12 del buf12 triton_poi_fused_add_convolution_6[grid(128)](buf13, primals_15, buf7, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 8, 2, 2), (32, 4, 2, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_3[grid(128)](buf15, primals_17, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_17 buf16 = extern_kernels.convolution(buf15, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1)) buf17 = buf16 del buf16 triton_poi_fused_add_convolution_7[grid(256)](buf17, primals_19, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 2, 4, 4), (32, 16, 4, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_0[grid(128)](buf19, primals_21, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 2, 4, 4), (32, 16, 4, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_0[grid(128)](buf21, primals_23, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_23 buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 4, 4, 4), (64, 16, 4, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_8[grid(256)](buf23, primals_25, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_25 return (buf23, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21) class ITN2DNew(nn.Module): def __init__(self, input_channels): super(ITN2DNew, self).__init__() use_bias = True self.conv11 = nn.Conv2d(input_channels, 2, kernel_size=3, padding=1, bias=use_bias) self.conv12 = nn.Conv2d(2, 4, kernel_size=3, padding=1, bias=use_bias) self.down1 = nn.Conv2d(4, 8, kernel_size=2, stride=2, bias=use_bias) self.conv21 = nn.Conv2d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.down2 = nn.Conv2d(8, 16, kernel_size=2, stride=2, bias=use_bias) self.conv31 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=use_bias ) self.up2 = nn.ConvTranspose2d(16, 8, kernel_size=2, stride=2, bias= use_bias) self.conv22 = nn.Conv2d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.up1 = nn.ConvTranspose2d(8, 4, kernel_size=2, stride=2, bias= use_bias) self.conv13 = nn.Conv2d(4, 2, kernel_size=3, padding=1, bias=use_bias) self.conv14 = nn.Conv2d(2, 2, kernel_size=3, padding=1, bias=use_bias) self.conv15 = nn.Conv2d(2, input_channels, kernel_size=3, padding=1, bias=use_bias) def forward(self, input_0): primals_1 = self.conv11.weight primals_2 = self.conv11.bias primals_4 = self.conv12.weight primals_5 = self.conv12.bias primals_6 = self.down1.weight primals_7 = self.down1.bias primals_8 = self.conv21.weight primals_9 = self.conv21.bias primals_10 = self.down2.weight primals_11 = self.down2.bias primals_12 = self.conv31.weight primals_13 = self.conv31.bias primals_14 = self.up2.weight primals_15 = self.up2.bias primals_16 = self.conv22.weight primals_17 = self.conv22.bias primals_18 = self.up1.weight primals_19 = self.up1.bias primals_20 = self.conv13.weight primals_21 = self.conv13.bias primals_22 = self.conv14.weight primals_23 = self.conv14.bias primals_24 = self.conv15.weight primals_25 = self.conv15.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0]
swaroopkml96/istn
ITN2D
false
16,533
[ "Apache-2.0" ]
91
600543e071aa56907509aa090697295cdc69a6b1
https://github.com/swaroopkml96/istn/tree/600543e071aa56907509aa090697295cdc69a6b1
Conv_Q
import torch import torch.nn.functional as F from torch.functional import F from torch import nn from typing import * from torch.nn import functional as F class Conv_Q(nn.Module): def __init__(self, frames, num_actions): super(Conv_Q, self).__init__() self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4) self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.q1 = nn.Linear(3136, 512) self.q2 = nn.Linear(512, 16) self.q3 = nn.Linear(16, num_actions) self.i1 = nn.Linear(3136, 512) self.i2 = nn.Linear(512, 16) self.i3 = nn.Linear(16, num_actions) def forward(self, state): c = F.relu(self.c1(state)) c = F.relu(self.c2(c)) c = F.relu(self.c3(c)) q = F.relu(self.q1(c.reshape(-1, 3136))) q = F.relu(self.q2(q)) q = self.q3(q) i = F.relu(self.i1(c.reshape(-1, 3136))) i = F.relu(self.i2(i)) i = self.i3(i) return q, F.log_softmax(i, dim=1), i def encode(self, state): with torch.no_grad(): c = F.relu(self.c1(state)) c = F.relu(self.c2(c)) c = F.relu(self.c3(c)) q = F.relu(self.q1(c.reshape(-1, 3136))) q = F.relu(self.q2(q)) i = F.relu(self.i1(c.reshape(-1, 3136))) i = F.relu(self.i2(i)) return i def get_inputs(): return [torch.rand([4, 4, 144, 144])] def get_init_inputs(): return [[], {'frames': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch.functional import F from torch import nn from typing import * from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1225 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (512, 3136), (3136, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (16, 512), (512, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (512, 3136), (3136, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (16, 512), (512, 1)) assert_size_stride(primals_17, (16,), (1,)) assert_size_stride(primals_18, (4, 16), (16, 1)) assert_size_stride(primals_19, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2, 156800, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 14, 14), (12544, 196, 14, 1)) buf5 = buf4 del buf4 buf18 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(50176)]( buf5, primals_7, buf18, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0 ), reinterpret_tensor(primals_8, (3136, 512), (1, 3136), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_3[grid(8192)](buf7, primals_9, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (512, 16), ( 1, 512), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(256)](buf9, primals_11, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf9, reinterpret_tensor( primals_12, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf10) del primals_13 buf11 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0 ), reinterpret_tensor(primals_14, (3136, 512), (1, 3136), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_3[grid(8192)](buf12, primals_15, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_15 buf13 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_16, (512, 16), (1, 512), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_4[grid(256)](buf14, primals_17, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_17 buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_19, buf14, reinterpret_tensor( primals_18, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf15) del primals_19 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_5[grid(64)](buf15, buf16, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_6[grid(64)](buf16, buf17, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf16 return (buf10, buf17, buf15, primals_1, primals_3, primals_4, primals_6, buf1, buf3, reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0), buf7, buf9, buf12, buf14, buf17, primals_18, primals_16, primals_14, primals_12, primals_10, primals_8, buf18) class Conv_QNew(nn.Module): def __init__(self, frames, num_actions): super(Conv_QNew, self).__init__() self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4) self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.q1 = nn.Linear(3136, 512) self.q2 = nn.Linear(512, 16) self.q3 = nn.Linear(16, num_actions) self.i1 = nn.Linear(3136, 512) self.i2 = nn.Linear(512, 16) self.i3 = nn.Linear(16, num_actions) def encode(self, state): with torch.no_grad(): c = F.relu(self.c1(state)) c = F.relu(self.c2(c)) c = F.relu(self.c3(c)) q = F.relu(self.q1(c.reshape(-1, 3136))) q = F.relu(self.q2(q)) i = F.relu(self.i1(c.reshape(-1, 3136))) i = F.relu(self.i2(i)) return i def forward(self, input_0): primals_1 = self.c1.weight primals_2 = self.c1.bias primals_4 = self.c2.weight primals_5 = self.c2.bias primals_6 = self.c3.weight primals_7 = self.c3.bias primals_8 = self.q1.weight primals_9 = self.q1.bias primals_10 = self.q2.weight primals_11 = self.q2.bias primals_12 = self.q3.weight primals_13 = self.q3.bias primals_14 = self.i1.weight primals_15 = self.i1.bias primals_16 = self.i2.weight primals_17 = self.i2.bias primals_18 = self.i3.weight primals_19 = self.i3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0], output[1], output[2]
ssimonc/NeoRL
Conv_Q
false
16,534
[ "Apache-2.0" ]
50
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
https://github.com/ssimonc/NeoRL/tree/098c58c8e4c3e43e67803f6384619d3bfe7fce5d
Dense
from torch.autograd import Function from torch.nn import Module import torch from torch.nn import Parameter class DenseFunction(Function): @staticmethod def forward(ctx, input, weight, bias=None): output = input.mm(weight.t()) if bias is not None: output += bias.unsqueeze(0).expand_as(output) output = torch.sigmoid(output) ctx.save_for_backward(input, weight, bias, output) return output @staticmethod def backward(ctx, grad_output): input, weight, bias, output = ctx.saved_tensors grad_sigmoid = (1.0 - output) * output grad_output = grad_sigmoid * grad_output grad_input = grad_weight = grad_bias = None if ctx.needs_input_grad[0]: grad_input = grad_output.mm(weight) if ctx.needs_input_grad[1]: grad_weight = grad_output.t().mm(input) if bias is not None and ctx.needs_input_grad[2]: grad_bias = grad_output.sum(0).squeeze(0) return grad_input, grad_weight, grad_bias class Dense(Module): def __init__(self, input_features, output_features, bias=True): super(Dense, self).__init__() self.input_features = input_features self.output_features = output_features self.weight = Parameter(torch.Tensor(output_features, input_features)) if bias: self.bias = Parameter(torch.Tensor(output_features)) else: self.register_parameter('bias', None) self.weight.data.uniform_(-0.1, 0.1) if bias is not None: self.bias.data.uniform_(-0.1, 0.1) def forward(self, input): return DenseFunction.apply(input, self.weight, self.bias) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_features': 4, 'output_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function from torch.nn import Module from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_sigmoid_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf1, primals_3, buf1 class DenseFunction(Function): @staticmethod def forward(ctx, input, weight, bias=None): output = input.mm(weight.t()) if bias is not None: output += bias.unsqueeze(0).expand_as(output) output = torch.sigmoid(output) ctx.save_for_backward(input, weight, bias, output) return output @staticmethod def backward(ctx, grad_output): input, weight, bias, output = ctx.saved_tensors grad_sigmoid = (1.0 - output) * output grad_output = grad_sigmoid * grad_output grad_input = grad_weight = grad_bias = None if ctx.needs_input_grad[0]: grad_input = grad_output.mm(weight) if ctx.needs_input_grad[1]: grad_weight = grad_output.t().mm(input) if bias is not None and ctx.needs_input_grad[2]: grad_bias = grad_output.sum(0).squeeze(0) return grad_input, grad_weight, grad_bias class DenseNew(Module): def __init__(self, input_features, output_features, bias=True): super(DenseNew, self).__init__() self.input_features = input_features self.output_features = output_features self.weight = Parameter(torch.Tensor(output_features, input_features)) if bias: self.bias = Parameter(torch.Tensor(output_features)) else: self.register_parameter('bias', None) self.weight.data.uniform_(-0.1, 0.1) if bias is not None: self.bias.data.uniform_(-0.1, 0.1) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
tczhangzhi/pytorch-parallel
Dense
false
16,535
[ "MIT" ]
117
8d8baf80dd48234386051d0bab616de5b55f8f5c
https://github.com/tczhangzhi/pytorch-parallel/tree/8d8baf80dd48234386051d0bab616de5b55f8f5c
TripletLoss
import torch from torch.nn.modules.distance import PairwiseDistance class TripletLoss(torch.nn.Module): def __init__(self, margin): super(TripletLoss, self).__init__() self.margin = margin self.pdist = PairwiseDistance(2) def forward(self, anchor, positive, negative): pos_dist = self.pdist.forward(anchor, positive) neg_dist = self.pdist.forward(anchor, negative) hinge_dist = torch.clamp(self.margin + pos_dist - neg_dist, min=0.0) loss = torch.mean(hinge_dist) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn.modules.distance import PairwiseDistance assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_mean_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 4.0 tmp26 = tmp24 + tmp25 tmp28 = tmp0 - tmp27 tmp29 = tmp28 + tmp3 tmp30 = tmp29 * tmp29 tmp32 = tmp6 - tmp31 tmp33 = tmp32 + tmp3 tmp34 = tmp33 * tmp33 tmp35 = tmp30 + tmp34 tmp37 = tmp12 - tmp36 tmp38 = tmp37 + tmp3 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp42 = tmp18 - tmp41 tmp43 = tmp42 + tmp3 tmp44 = tmp43 * tmp43 tmp45 = tmp40 + tmp44 tmp46 = libdevice.sqrt(tmp45) tmp47 = tmp26 - tmp46 tmp48 = 0.0 tmp49 = triton_helpers.maximum(tmp47, tmp48) tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.sum(tmp50, 1)[:, None] tmp53 = 64.0 tmp54 = tmp52 / tmp53 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_clamp_mean_norm_sub_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class TripletLossNew(torch.nn.Module): def __init__(self, margin): super(TripletLossNew, self).__init__() self.margin = margin self.pdist = PairwiseDistance(2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
tbmoon/facenet
TripletLoss
false
16,536
[ "MIT" ]
231
b3aec1a930f22a5a9597efa7072373c0ff93663f
https://github.com/tbmoon/facenet/tree/b3aec1a930f22a5a9597efa7072373c0ff93663f
ConcatBlock
import torch import torch.nn as nn class ConcatBlock(nn.Module): def __init__(self, in_channels, out_channels): super(ConcatBlock, self).__init__() self.in_chns = in_channels self.out_chns = out_channels self.conv1 = nn.Conv2d(self.in_chns, self.in_chns, kernel_size=1, padding=0) self.conv2 = nn.Conv2d(self.in_chns, self.out_chns, kernel_size=1, padding=0) self.ac1 = nn.LeakyReLU() self.ac2 = nn.LeakyReLU() def forward(self, x): x = self.conv1(x) x = self.ac1(x) x = self.conv2(x) x = self.ac2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf3, primals_5, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 return buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4 class ConcatBlockNew(nn.Module): def __init__(self, in_channels, out_channels): super(ConcatBlockNew, self).__init__() self.in_chns = in_channels self.out_chns = out_channels self.conv1 = nn.Conv2d(self.in_chns, self.in_chns, kernel_size=1, padding=0) self.conv2 = nn.Conv2d(self.in_chns, self.out_chns, kernel_size=1, padding=0) self.ac1 = nn.LeakyReLU() self.ac2 = nn.LeakyReLU() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
tea321000/SSL4MIS
ConcatBlock
false
16,537
[ "MIT" ]
854
8d1b0be08cf089943481a47877b36eb6405fffb2
https://github.com/tea321000/SSL4MIS/tree/8d1b0be08cf089943481a47877b36eb6405fffb2
OutPutBlock
import torch import torch.nn as nn class OutPutBlock(nn.Module): def __init__(self, in_channels, out_channels): super(OutPutBlock, self).__init__() self.in_chns = in_channels self.out_chns = out_channels self.conv1 = nn.Conv2d(self.in_chns, self.in_chns // 2, kernel_size =1, padding=0) self.conv2 = nn.Conv2d(self.in_chns // 2, self.out_chns, kernel_size=1, padding=0) self.drop1 = nn.Dropout2d(0.3) self.drop2 = nn.Dropout2d(0.3) self.ac1 = nn.LeakyReLU() def forward(self, x): x = self.drop1(x) x = self.conv1(x) x = self.ac1(x) x = self.drop2(x) x = self.conv2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 2 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2, 1, 1), (2, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1)) buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(128)](buf0, primals_3, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf4, primals_1, primals_2, primals_4, buf1, buf2 class OutPutBlockNew(nn.Module): def __init__(self, in_channels, out_channels): super(OutPutBlockNew, self).__init__() self.in_chns = in_channels self.out_chns = out_channels self.conv1 = nn.Conv2d(self.in_chns, self.in_chns // 2, kernel_size =1, padding=0) self.conv2 = nn.Conv2d(self.in_chns // 2, self.out_chns, kernel_size=1, padding=0) self.drop1 = nn.Dropout2d(0.3) self.drop2 = nn.Dropout2d(0.3) self.ac1 = nn.LeakyReLU() def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
tea321000/SSL4MIS
OutPutBlock
false
16,538
[ "MIT" ]
854
8d1b0be08cf089943481a47877b36eb6405fffb2
https://github.com/tea321000/SSL4MIS/tree/8d1b0be08cf089943481a47877b36eb6405fffb2
MinimalRNNCell
import torch from torch import nn from functools import partial def get_initializer(name, activation): if activation in ['id', 'identity', 'linear', 'modrelu']: nonlinearity = 'linear' elif activation in ['relu', 'tanh', 'sigmoid']: nonlinearity = activation else: assert False, f'get_initializer: activation {activation} not supported' if name == 'uniform': initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity= nonlinearity) elif name == 'normal': initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity= nonlinearity) elif name == 'xavier': initializer = torch.nn.init.xavier_normal_ elif name == 'zero': initializer = partial(torch.nn.init.constant_, val=0) elif name == 'one': initializer = partial(torch.nn.init.constant_, val=1) else: assert False, f'get_initializer: initializer type {name} not supported' return initializer def Linear_(input_size, output_size, bias, init='normal', zero_bias_init= False, **kwargs): """ Returns a nn.Linear module with initialization options """ l = nn.Linear(input_size, output_size, bias=bias, **kwargs) get_initializer(init, 'linear')(l.weight) if bias and zero_bias_init: nn.init.zeros_(l.bias) return l def get_activation(activation, size): if activation == 'id': return nn.Identity() elif activation == 'tanh': return torch.tanh elif activation == 'relu': return torch.relu elif activation == 'sigmoid': return torch.sigmoid elif activation == 'modrelu': return Modrelu(size) else: raise NotImplementedError("hidden activation '{}' is not implemented" .format(activation)) class Gate(nn.Module): """ Implements gating mechanisms. Mechanisms: N - No gate G - Standard sigmoid gate """ def __init__(self, size, preact_ctor, preact_args, mechanism='N'): super().__init__() self.size = size self.mechanism = mechanism if self.mechanism == 'N': pass elif self.mechanism == 'G': self.W_g = preact_ctor(*preact_args) else: assert False, f'Gating type {self.mechanism} is not supported.' def forward(self, *inputs): if self.mechanism == 'N': return 1.0 if self.mechanism == 'G': g_preact = self.W_g(*inputs) g = torch.sigmoid(g_preact) return g class modrelu(nn.Module): def __init__(self, features): super(modrelu, self).__init__() self.features = features self.b = nn.Parameter(torch.Tensor(self.features)) self.reset_parameters() def reset_parameters(self): self.b.data.uniform_(-0.01, 0.01) def forward(self, inputs): norm = torch.abs(inputs) biased_norm = norm + self.b magnitude = nn.functional.relu(biased_norm) phase = torch.sign(inputs) return phase * magnitude class CellBase(nn.Module): """ Abstract class for our recurrent cell interface. Passes input through """ registry = {} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if hasattr(cls, 'name') and cls.name is not None: cls.registry[cls.name] = cls name = 'id' valid_keys = [] def default_initializers(self): return {} def default_architecture(self): return {} def __init__(self, input_size, hidden_size, initializers=None, architecture=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.architecture = self.default_architecture() self.initializers = self.default_initializers() if initializers is not None: self.initializers.update(initializers) None if architecture is not None: self.architecture.update(architecture) assert set(self.initializers.keys()).issubset(self.valid_keys) assert set(self.architecture.keys()).issubset(self.valid_keys) self.reset_parameters() def reset_parameters(self): pass def forward(self, input, hidden): return input, input def default_state(self, input, batch_size=None): return input.new_zeros(input.size(0) if batch_size is None else batch_size, self.hidden_size, requires_grad=False) def output(self, h): return h def state_size(self): return self.hidden_size def output_size(self): return self.hidden_size def initial_state(self, trainable=False): """ Return initial state of the RNN This should not need to see the input as it should be batch size agnostic and automatically broadcasted # TODO Currently not used """ if trainable: self.initial_state = torch.zeros(self.hidden_size, requires_grad=True) else: return torch.zeros(self.hidden_size, requires_grad=True) class Modrelu(modrelu): def reset_parameters(self): self.b.data.uniform_(-0.0, 0.0) class MinimalRNNCell(CellBase): name = 'mrnn' valid_keys = ['hx', 'bias'] def default_initializers(self): return {'hx': 'xavier'} def default_architecture(self): return {'bias': True} def __init__(self, input_size, hidden_size, hidden_activation='tanh', orthogonal=False, ortho_args=None, zero_bias_init=False, **kwargs): self.hidden_activation = hidden_activation self.zero_bias_init = zero_bias_init super().__init__(input_size, hidden_size, **kwargs) def reset_parameters(self): self.W_hx = Linear_(self.input_size, self.hidden_size, bias=self. architecture['bias'], zero_bias_init=self.zero_bias_init) get_initializer(self.initializers['hx'], self.hidden_activation)(self .W_hx.weight) self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size) preact_ctor = Linear_ preact_args = [self.input_size + self.hidden_size, self.hidden_size, self.architecture['bias']] self.W_g = Gate(self.hidden_size, preact_ctor, preact_args, mechanism='G') def forward(self, input, h): hidden_preact = self.W_hx(input) hidden = self.hidden_activation_fn(hidden_preact) hx = torch.cat((input, h), dim=-1) g = self.W_g(hx) h = (1.0 - g) * h + g * hidden return h, h def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from functools import partial assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tmp3 * tmp4 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp1 * tmp7 tmp9 = tmp5 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, primals_4, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(256)](buf2, primals_4, buf0, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 8), (8, 1), 0), buf2 def get_initializer(name, activation): if activation in ['id', 'identity', 'linear', 'modrelu']: nonlinearity = 'linear' elif activation in ['relu', 'tanh', 'sigmoid']: nonlinearity = activation else: assert False, f'get_initializer: activation {activation} not supported' if name == 'uniform': initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity= nonlinearity) elif name == 'normal': initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity= nonlinearity) elif name == 'xavier': initializer = torch.nn.init.xavier_normal_ elif name == 'zero': initializer = partial(torch.nn.init.constant_, val=0) elif name == 'one': initializer = partial(torch.nn.init.constant_, val=1) else: assert False, f'get_initializer: initializer type {name} not supported' return initializer def Linear_(input_size, output_size, bias, init='normal', zero_bias_init= False, **kwargs): """ Returns a nn.Linear module with initialization options """ l = nn.Linear(input_size, output_size, bias=bias, **kwargs) get_initializer(init, 'linear')(l.weight) if bias and zero_bias_init: nn.init.zeros_(l.bias) return l def get_activation(activation, size): if activation == 'id': return nn.Identity() elif activation == 'tanh': return torch.tanh elif activation == 'relu': return torch.relu elif activation == 'sigmoid': return torch.sigmoid elif activation == 'modrelu': return Modrelu(size) else: raise NotImplementedError("hidden activation '{}' is not implemented" .format(activation)) class Gate(nn.Module): """ Implements gating mechanisms. Mechanisms: N - No gate G - Standard sigmoid gate """ def __init__(self, size, preact_ctor, preact_args, mechanism='N'): super().__init__() self.size = size self.mechanism = mechanism if self.mechanism == 'N': pass elif self.mechanism == 'G': self.W_g = preact_ctor(*preact_args) else: assert False, f'Gating type {self.mechanism} is not supported.' def forward(self, *inputs): if self.mechanism == 'N': return 1.0 if self.mechanism == 'G': g_preact = self.W_g(*inputs) g = torch.sigmoid(g_preact) return g class modrelu(nn.Module): def __init__(self, features): super(modrelu, self).__init__() self.features = features self.b = nn.Parameter(torch.Tensor(self.features)) self.reset_parameters() def reset_parameters(self): self.b.data.uniform_(-0.01, 0.01) def forward(self, inputs): norm = torch.abs(inputs) biased_norm = norm + self.b magnitude = nn.functional.relu(biased_norm) phase = torch.sign(inputs) return phase * magnitude class CellBase(nn.Module): """ Abstract class for our recurrent cell interface. Passes input through """ registry = {} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if hasattr(cls, 'name') and cls.name is not None: cls.registry[cls.name] = cls name = 'id' valid_keys = [] def default_initializers(self): return {} def default_architecture(self): return {} def __init__(self, input_size, hidden_size, initializers=None, architecture=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.architecture = self.default_architecture() self.initializers = self.default_initializers() if initializers is not None: self.initializers.update(initializers) None if architecture is not None: self.architecture.update(architecture) assert set(self.initializers.keys()).issubset(self.valid_keys) assert set(self.architecture.keys()).issubset(self.valid_keys) self.reset_parameters() def reset_parameters(self): pass def forward(self, input, hidden): return input, input def default_state(self, input, batch_size=None): return input.new_zeros(input.size(0) if batch_size is None else batch_size, self.hidden_size, requires_grad=False) def output(self, h): return h def state_size(self): return self.hidden_size def output_size(self): return self.hidden_size def initial_state(self, trainable=False): """ Return initial state of the RNN This should not need to see the input as it should be batch size agnostic and automatically broadcasted # TODO Currently not used """ if trainable: self.initial_state = torch.zeros(self.hidden_size, requires_grad=True) else: return torch.zeros(self.hidden_size, requires_grad=True) class Modrelu(modrelu): def reset_parameters(self): self.b.data.uniform_(-0.0, 0.0) class MinimalRNNCellNew(CellBase): name = 'mrnn' valid_keys = ['hx', 'bias'] def default_initializers(self): return {'hx': 'xavier'} def default_architecture(self): return {'bias': True} def __init__(self, input_size, hidden_size, hidden_activation='tanh', orthogonal=False, ortho_args=None, zero_bias_init=False, **kwargs): self.hidden_activation = hidden_activation self.zero_bias_init = zero_bias_init super().__init__(input_size, hidden_size, **kwargs) def reset_parameters(self): self.W_hx = Linear_(self.input_size, self.hidden_size, bias=self. architecture['bias'], zero_bias_init=self.zero_bias_init) get_initializer(self.initializers['hx'], self.hidden_activation)(self .W_hx.weight) self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size) preact_ctor = Linear_ preact_args = [self.input_size + self.hidden_size, self.hidden_size, self.architecture['bias']] self.W_g = Gate(self.hidden_size, preact_ctor, preact_args, mechanism='G') def forward(self, input_0, input_1): primals_1 = self.W_hx.weight primals_2 = self.W_hx.bias primals_5 = self.W_g.W_g.weight primals_6 = self.W_g.W_g.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
tarepan/HiPPO
MinimalRNNCell
false
16,539
[ "Apache-2.0" ]
57
bc23e2dba13da6c307cb5a4ae248c2d2c56d465f
https://github.com/tarepan/HiPPO/tree/bc23e2dba13da6c307cb5a4ae248c2d2c56d465f
AvgPoolShortening
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class AvgPoolShortening(Module): """ ### Average pool shortening This down-samples by a given factor with average pooling """ def __init__(self, k: 'int'): """ * `k` is the shortening factor """ super().__init__() self.pool = nn.AvgPool1d(k, ceil_mode=True) def forward(self, x: 'torch.Tensor'): """ * `x` is of shape `[seq_len, batch_size, d_model]` """ return self.pool(x.permute(1, 2, 0)).permute(2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'k': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (1, 4, 4), (1, 4, 1), 0), class AvgPoolShorteningNew(Module): """ ### Average pool shortening This down-samples by a given factor with average pooling """ def __init__(self, k: 'int'): """ * `k` is the shortening factor """ super().__init__() self.pool = nn.AvgPool1d(k, ceil_mode=True) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
AvgPoolShortening
false
16,540
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
MLPAutoencoder
import torch def choose_nonlinearity(name): nl = None if name == 'tanh': nl = torch.tanh elif name == 'relu': nl = torch.relu elif name == 'sigmoid': nl = torch.sigmoid elif name == 'softplus': nl = torch.nn.functional.softplus elif name == 'selu': nl = torch.nn.functional.selu elif name == 'elu': nl = torch.nn.functional.elu elif name == 'swish': def nl(x): return x * torch.sigmoid(x) else: raise ValueError('nonlinearity not recognized') return nl class MLPAutoencoder(torch.nn.Module): """A salt-of-the-earth MLP Autoencoder + some edgy res connections""" def __init__(self, input_dim, hidden_dim, latent_dim, nonlinearity='tanh'): super(MLPAutoencoder, self).__init__() self.linear1 = torch.nn.Linear(input_dim, hidden_dim) self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear3 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear4 = torch.nn.Linear(hidden_dim, latent_dim) self.linear5 = torch.nn.Linear(latent_dim, hidden_dim) self.linear6 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear7 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear8 = torch.nn.Linear(hidden_dim, input_dim) for l in [self.linear1, self.linear2, self.linear3, self.linear4, self.linear5, self.linear6, self.linear7, self.linear8]: torch.nn.init.orthogonal_(l.weight) self.nonlinearity = choose_nonlinearity(nonlinearity) def encode(self, x): h = self.nonlinearity(self.linear1(x)) h = h + self.nonlinearity(self.linear2(h)) h = h + self.nonlinearity(self.linear3(h)) return self.linear4(h) def decode(self, z): h = self.nonlinearity(self.linear5(z)) h = h + self.nonlinearity(self.linear6(h)) h = h + self.nonlinearity(self.linear7(h)) return self.linear8(h) def forward(self, x): z = self.encode(x) x_hat = self.decode(z) return x_hat def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4, 'latent_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_tanh_1[grid(256)](buf1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_tanh_1[grid(256)](buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 triton_poi_fused_tanh_0[grid(256)](buf8, primals_11, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_11 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_13 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_tanh_1[grid(256)](buf8, buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_15 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_tanh_1[grid(256)](buf10, buf11, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_17 return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), buf6, buf8, buf9, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), buf11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_16, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4) def choose_nonlinearity(name): nl = None if name == 'tanh': nl = torch.tanh elif name == 'relu': nl = torch.relu elif name == 'sigmoid': nl = torch.sigmoid elif name == 'softplus': nl = torch.nn.functional.softplus elif name == 'selu': nl = torch.nn.functional.selu elif name == 'elu': nl = torch.nn.functional.elu elif name == 'swish': def nl(x): return x * torch.sigmoid(x) else: raise ValueError('nonlinearity not recognized') return nl class MLPAutoencoderNew(torch.nn.Module): """A salt-of-the-earth MLP Autoencoder + some edgy res connections""" def __init__(self, input_dim, hidden_dim, latent_dim, nonlinearity='tanh'): super(MLPAutoencoderNew, self).__init__() self.linear1 = torch.nn.Linear(input_dim, hidden_dim) self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear3 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear4 = torch.nn.Linear(hidden_dim, latent_dim) self.linear5 = torch.nn.Linear(latent_dim, hidden_dim) self.linear6 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear7 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear8 = torch.nn.Linear(hidden_dim, input_dim) for l in [self.linear1, self.linear2, self.linear3, self.linear4, self.linear5, self.linear6, self.linear7, self.linear8]: torch.nn.init.orthogonal_(l.weight) self.nonlinearity = choose_nonlinearity(nonlinearity) def encode(self, x): h = self.nonlinearity(self.linear1(x)) h = h + self.nonlinearity(self.linear2(h)) h = h + self.nonlinearity(self.linear3(h)) return self.linear4(h) def decode(self, z): h = self.nonlinearity(self.linear5(z)) h = h + self.nonlinearity(self.linear6(h)) h = h + self.nonlinearity(self.linear7(h)) return self.linear8(h) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_8 = self.linear4.weight primals_9 = self.linear4.bias primals_10 = self.linear5.weight primals_11 = self.linear5.bias primals_12 = self.linear6.weight primals_13 = self.linear6.bias primals_14 = self.linear7.weight primals_15 = self.linear7.bias primals_16 = self.linear8.weight primals_17 = self.linear8.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
tailintalent/hamiltonian-nn
MLPAutoencoder
false
16,541
[ "Apache-2.0" ]
293
1f6dd2d58ab84977a30584f0d1dd7f8b234e4049
https://github.com/tailintalent/hamiltonian-nn/tree/1f6dd2d58ab84977a30584f0d1dd7f8b234e4049
ClippedValueFunctionLoss
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class ClippedValueFunctionLoss(Module): """ ## Clipped Value Function Loss Similarly we clip the value function update also. egin{align} V^{\\pi_ heta}_{CLIP}(s_t) &= clip\\Bigl(V^{\\pi_ heta}(s_t) - \\hat{V_t}, -\\epsilon, +\\epsilon\\Bigr) \\ \\mathcal{L}^{VF}( heta) &= rac{1}{2} \\mathbb{E} iggl[ max\\Bigl(igl(V^{\\pi_ heta}(s_t) - R_tigr)^2, igl(V^{\\pi_ heta}_{CLIP}(s_t) - R_tigr)^2\\Bigr) iggr] \\end{align} Clipping makes sure the value function $V_ heta$ doesn't deviate significantly from $V_{ heta_{OLD}}$. """ def forward(self, value: 'torch.Tensor', sampled_value: 'torch.Tensor', sampled_return: 'torch.Tensor', clip: 'float'): clipped_value = sampled_value + (value - sampled_value).clamp(min=- clip, max=clip) vf_loss = torch.max((value - sampled_return) ** 2, (clipped_value - sampled_return) ** 2) return 0.5 * vf_loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp6 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp0 - tmp4 tmp7 = -tmp6 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp9 = triton_helpers.minimum(tmp8, tmp6) tmp10 = tmp4 + tmp9 tmp11 = tmp10 - tmp1 tmp12 = tmp11 * tmp11 tmp13 = triton_helpers.maximum(tmp3, tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = 0.5 tmp20 = tmp18 * tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0[grid(1)](buf1 , arg0_1, arg3_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1 ) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf1, class ClippedValueFunctionLossNew(Module): """ ## Clipped Value Function Loss Similarly we clip the value function update also. egin{align} V^{\\pi_ heta}_{CLIP}(s_t) &= clip\\Bigl(V^{\\pi_ heta}(s_t) - \\hat{V_t}, -\\epsilon, +\\epsilon\\Bigr) \\ \\mathcal{L}^{VF}( heta) &= rac{1}{2} \\mathbb{E} iggl[ max\\Bigl(igl(V^{\\pi_ heta}(s_t) - R_tigr)^2, igl(V^{\\pi_ heta}_{CLIP}(s_t) - R_tigr)^2\\Bigr) iggr] \\end{align} Clipping makes sure the value function $V_ heta$ doesn't deviate significantly from $V_{ heta_{OLD}}$. """ def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
ClippedValueFunctionLoss
false
16,542
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Loss
import torch import torch.nn.functional as F from torch import nn def _iou(pred, target): b = pred.shape[0] IoU = 0.0 for i in range(0, b): Iand1 = torch.sum(target[i, :, :] * pred[i, :, :]) Ior1 = torch.sum(target[i, :, :]) + torch.sum(pred[i, :, :]) - Iand1 IoU1 = Iand1 / Ior1 IoU = IoU + (1 - IoU1) return IoU / b class IOU(torch.nn.Module): def __init__(self): super(IOU, self).__init__() def forward(self, pred, target): return _iou(pred, target) class Weighed_Bce_Loss(nn.Module): def __init__(self): super(Weighed_Bce_Loss, self).__init__() def forward(self, x, label): x = x.view(-1, 1, x.shape[1], x.shape[2]) label = label.view(-1, 1, label.shape[1], label.shape[2]) label_t = (label == 1).float() label_f = (label == 0).float() p = torch.sum(label_t) / (torch.sum(label_t) + torch.sum(label_f)) w = torch.zeros_like(label) w[label == 1] = p w[label == 0] = 1 - p loss = F.binary_cross_entropy(x, label, weight=w) return loss class Cls_Loss(nn.Module): def __init__(self): super(Cls_Loss, self).__init__() def forward(self, x, label): loss = F.binary_cross_entropy(x, label) return loss class S_Loss(nn.Module): def __init__(self): super(S_Loss, self).__init__() def forward(self, x, label): loss = F.smooth_l1_loss(x, label) return loss class Loss(nn.Module): def __init__(self): super(Loss, self).__init__() self.loss_wbce = Weighed_Bce_Loss() self.loss_cls = Cls_Loss() self.loss_s = S_Loss() self.loss_i = IOU() self.w_wbce = 1 self.w_cls = 1 self.w_smooth = 1 self.w_iou = 1 def forward(self, x, label, x_cls, label_cls): m_loss = self.loss_wbce(x, label) * self.w_wbce c_loss = self.loss_cls(x_cls, label_cls) * self.w_cls s_loss = self.loss_s(x, label) * self.w_smooth iou_loss = self.loss_i(x, label) * self.w_iou loss = m_loss + c_loss + s_loss + iou_loss return loss, m_loss, c_loss, s_loss, iou_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp0 * tmp4 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (64 + r0), None) tmp4 = tl.load(in_ptr1 + (64 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp0 * tmp4 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) @triton.jit def triton_per_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (128 + r0), None) tmp4 = tl.load(in_ptr1 + (128 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp0 * tmp4 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) @triton.jit def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (192 + r0), None) tmp4 = tl.load(in_ptr1 + (192 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp0 * tmp4 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) @triton.jit def triton_per_fused__to_copy_add_binary_cross_entropy_div_eq_index_put_mul_rsub_smooth_l1_loss_sub_sum_zeros_like_4( in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, out_ptr3, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp16 = tl.load(in_ptr2 + r0, None) tmp34 = tl.load(in_ptr3 + r0, None) tmp66 = tl.load(in_ptr4 + 0) tmp67 = tl.broadcast_to(tmp66, [1]) tmp68 = tl.load(in_out_ptr4 + 0) tmp69 = tl.broadcast_to(tmp68, [1]) tmp70 = tl.load(in_ptr5 + 0) tmp71 = tl.broadcast_to(tmp70, [1]) tmp77 = tl.load(in_ptr6 + 0) tmp78 = tl.broadcast_to(tmp77, [1]) tmp79 = tl.load(in_ptr7 + 0) tmp80 = tl.broadcast_to(tmp79, [1]) tmp81 = tl.load(in_ptr8 + 0) tmp82 = tl.broadcast_to(tmp81, [1]) tmp88 = tl.load(in_ptr9 + 0) tmp89 = tl.broadcast_to(tmp88, [1]) tmp90 = tl.load(in_ptr10 + 0) tmp91 = tl.broadcast_to(tmp90, [1]) tmp92 = tl.load(in_ptr11 + 0) tmp93 = tl.broadcast_to(tmp92, [1]) tmp99 = tl.load(in_ptr12 + 0) tmp100 = tl.broadcast_to(tmp99, [1]) tmp101 = tl.load(in_ptr13 + 0) tmp102 = tl.broadcast_to(tmp101, [1]) tmp103 = tl.load(in_ptr14 + 0) tmp104 = tl.broadcast_to(tmp103, [1]) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp17 = tmp16 == tmp1 tmp18 = tmp17.to(tl.float32) tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 0.0 tmp23 = tmp16 == tmp22 tmp24 = tmp23.to(tl.float32) tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = tmp21 + tmp27 tmp29 = tmp21 / tmp28 tmp30 = tl.where(tmp17, tmp29, tmp22) tmp31 = tmp1 - tmp29 tmp32 = tl.where(tmp23, tmp31, tmp30) tmp33 = tmp16 - tmp1 tmp35 = -tmp34 tmp36 = libdevice.log1p(tmp35) tmp37 = triton_helpers.maximum(tmp36, tmp6) tmp38 = tmp33 * tmp37 tmp39 = tl_math.log(tmp34) tmp40 = triton_helpers.maximum(tmp39, tmp6) tmp41 = tmp16 * tmp40 tmp42 = tmp38 - tmp41 tmp43 = tmp42 * tmp32 tmp44 = tl.broadcast_to(tmp43, [RBLOCK]) tmp46 = triton_helpers.promote_to_tensor(tl.sum(tmp44, 0)) tmp47 = tmp34 - tmp16 tmp48 = tl_math.abs(tmp47) tmp49 = tmp48 < tmp1 tmp50 = tmp48 * tmp48 tmp51 = 0.5 tmp52 = tmp50 * tmp51 tmp53 = tmp52 * tmp1 tmp54 = tmp48 - tmp51 tmp55 = tl.where(tmp49, tmp53, tmp54) tmp56 = tl.broadcast_to(tmp55, [RBLOCK]) tmp58 = triton_helpers.promote_to_tensor(tl.sum(tmp56, 0)) tmp59 = 256.0 tmp60 = tmp46 / tmp59 tmp61 = tmp60 * tmp1 tmp62 = tmp15 / tmp59 tmp63 = tmp62 * tmp1 tmp64 = tmp58 / tmp59 tmp65 = tmp64 * tmp1 tmp72 = tmp69 + tmp71 tmp73 = tmp72 - tmp67 tmp74 = tmp67 / tmp73 tmp75 = tmp1 - tmp74 tmp76 = tmp75 + tmp22 tmp83 = tmp80 + tmp82 tmp84 = tmp83 - tmp78 tmp85 = tmp78 / tmp84 tmp86 = tmp1 - tmp85 tmp87 = tmp76 + tmp86 tmp94 = tmp91 + tmp93 tmp95 = tmp94 - tmp89 tmp96 = tmp89 / tmp95 tmp97 = tmp1 - tmp96 tmp98 = tmp87 + tmp97 tmp105 = tmp102 + tmp104 tmp106 = tmp105 - tmp100 tmp107 = tmp100 / tmp106 tmp108 = tmp1 - tmp107 tmp109 = tmp98 + tmp108 tmp110 = 0.25 tmp111 = tmp109 * tmp110 tmp112 = tmp111 * tmp1 tmp113 = tmp61 + tmp63 tmp114 = tmp113 + tmp65 tmp115 = tmp114 + tmp112 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp61, None) tl.debug_barrier() tl.store(in_out_ptr2 + tl.full([1], 0, tl.int32), tmp63, None) tl.debug_barrier() tl.store(in_out_ptr3 + tl.full([1], 0, tl.int32), tmp65, None) tl.debug_barrier() tl.store(in_out_ptr4 + tl.full([1], 0, tl.int32), tmp112, None) tl.store(out_ptr3 + tl.full([1], 0, tl.int32), tmp115, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf11 = empty_strided_cuda((), (), torch.float32) buf12 = empty_strided_cuda((), (), torch.float32) buf13 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(1)](arg1_1, arg0_1, buf11, buf12, buf13, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf14 = empty_strided_cuda((), (), torch.float32) buf15 = empty_strided_cuda((), (), torch.float32) buf16 = empty_strided_cuda((), (), torch.float32) triton_per_fused_mul_sum_1[grid(1)](arg1_1, arg0_1, buf14, buf15, buf16, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf17 = empty_strided_cuda((), (), torch.float32) buf18 = empty_strided_cuda((), (), torch.float32) buf19 = empty_strided_cuda((), (), torch.float32) triton_per_fused_mul_sum_2[grid(1)](arg1_1, arg0_1, buf17, buf18, buf19, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf20 = empty_strided_cuda((), (), torch.float32) buf21 = empty_strided_cuda((), (), torch.float32) buf22 = empty_strided_cuda((), (), torch.float32) triton_per_fused_mul_sum_3[grid(1)](arg1_1, arg0_1, buf20, buf21, buf22, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) buf5 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 buf8 = buf7 del buf7 buf10 = buf9 del buf9 buf23 = buf11 del buf11 buf24 = buf23 del buf23 buf25 = empty_strided_cuda((), (), torch.float32) triton_per_fused__to_copy_add_binary_cross_entropy_div_eq_index_put_mul_rsub_smooth_l1_loss_sub_sum_zeros_like_4[ grid(1)](buf6, buf8, buf10, buf24, arg2_1, arg3_1, arg1_1, arg0_1, buf13, buf12, buf16, buf14, buf15, buf19, buf17, buf18, buf22, buf20, buf21, buf25, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del buf12 del buf13 del buf14 del buf15 del buf16 del buf17 del buf18 del buf19 del buf20 del buf21 del buf22 return buf25, buf6, buf8, buf10, buf24 def _iou(pred, target): b = pred.shape[0] IoU = 0.0 for i in range(0, b): Iand1 = torch.sum(target[i, :, :] * pred[i, :, :]) Ior1 = torch.sum(target[i, :, :]) + torch.sum(pred[i, :, :]) - Iand1 IoU1 = Iand1 / Ior1 IoU = IoU + (1 - IoU1) return IoU / b class IOU(torch.nn.Module): def __init__(self): super(IOU, self).__init__() def forward(self, pred, target): return _iou(pred, target) class Weighed_Bce_Loss(nn.Module): def __init__(self): super(Weighed_Bce_Loss, self).__init__() def forward(self, x, label): x = x.view(-1, 1, x.shape[1], x.shape[2]) label = label.view(-1, 1, label.shape[1], label.shape[2]) label_t = (label == 1).float() label_f = (label == 0).float() p = torch.sum(label_t) / (torch.sum(label_t) + torch.sum(label_f)) w = torch.zeros_like(label) w[label == 1] = p w[label == 0] = 1 - p loss = F.binary_cross_entropy(x, label, weight=w) return loss class Cls_Loss(nn.Module): def __init__(self): super(Cls_Loss, self).__init__() def forward(self, x, label): loss = F.binary_cross_entropy(x, label) return loss class S_Loss(nn.Module): def __init__(self): super(S_Loss, self).__init__() def forward(self, x, label): loss = F.smooth_l1_loss(x, label) return loss class LossNew(nn.Module): def __init__(self): super(LossNew, self).__init__() self.loss_wbce = Weighed_Bce_Loss() self.loss_cls = Cls_Loss() self.loss_s = S_Loss() self.loss_i = IOU() self.w_wbce = 1 self.w_cls = 1 self.w_smooth = 1 self.w_iou = 1 def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1], output[2], output[3], output[4]
suyukun666/UFO
Loss
false
16,543
[ "MIT" ]
122
e57016948b03cd2f75155d2958cea69b6e4b56f8
https://github.com/suyukun666/UFO/tree/e57016948b03cd2f75155d2958cea69b6e4b56f8
DPFP
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class DPFP(Module): """ ## Deterministic Parameter Free Project (DPFP) This is the new projection function $ extcolor{lightgreen}{\\phi}$ introduced in the paper. DPFP projects $k$ of dimensionality $d_{key}$ to dimensionality $d_{dot} = 2 d_{key} u$, where $ u \\in \\{1, 2, ..., 2 d_{key} - 1 \\}$ is a hyper-parameter. $$ extcolor{lightgreen}{\\phi_{2 d_{key} (i - 1) + j}(k)} = ext{ReLU}\\Big(ig[k, -kig]\\Big)_{j} ext{ReLU}\\Big(ig[k, -kig]\\Big)_{i + j}$$ where $ig[k, -kig]$ is the concatenation of $k$ and $-k$ to give a vector of size $2 d_{key}$, $i \\in \\{1, 2, ..., u \\}$, and $j \\in \\{1, 2, ..., 2 d_{key}\\}$. $x_i$ is the $i$-th element of vector $x$ and is rolled around if $i$ is larger than the number of elements in $x$. Basically, it creates a new vector by multiplying elements of $[k, -k]$ shifted by $i$. This produces projections that are sparse (only a few elements of $phi$ are non-zero) and orthogonal ($ extcolor{lightgreen}{\\phi(k^{(i)})} \\cdot extcolor{lightgreen}{\\phi(k^{(j)})} pprox 0$ for most $i, j$ unless $k^{(i)}$ and $k^{(j)}$ are very similar. ### Normalization Paper introduces a simple normalization for $ extcolor{lightgreen}{\\phi}$, $$ extcolor{lightgreen}{\\phi '(k)} = rac{ extcolor{lightgreen}{\\phi(k)}}{\\sum^{d_{dot}}_{j=1} extcolor{lightgreen}{\\phi(k)_j}}$$ *Check the paper for derivation.* """ def __init__(self, nu: 'int'=1, eps: 'float'=1e-06): """ * `nu` is the hyper-parameter $ u$. * `eps` is the small value used to make sure there is no division-by-zero when normalizing. """ super().__init__() self.nu = nu self.relu = nn.ReLU() self.eps = eps def forward(self, k: 'torch.Tensor'): k = self.dpfp(k) return k / (torch.sum(k, dim=-1, keepdim=True) + self.eps) def dpfp(self, k: 'torch.Tensor'): """ $$ extcolor{lightgreen}{\\phi(k)}$$ """ x = self.relu(torch.cat([k, -k], dim=-1)) x_rolled = [x.roll(shifts=i, dims=-1) for i in range(1, self.nu + 1)] x_rolled = torch.cat(x_rolled, dim=-1) x_repeat = torch.cat([x] * self.nu, dim=-1) return x_repeat * x_rolled def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_cat_div_mul_relu_roll_sum_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + r1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * x0 + (-4 + r1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp14 = tl.full([1, 1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = (7 + r1) % 8 tmp18 = tmp16 < tmp3 tmp19 = tl.load(in_ptr0 + (4 * x0 + (7 + r1) % 8), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp16 >= tmp3 tmp22 = tl.load(in_ptr0 + (4 * x0 + (-4 + (7 + r1) % 8)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = -tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp20, tmp23, tmp24) tmp26 = tl.where(tmp18, tmp19, tmp25) tmp27 = triton_helpers.maximum(tmp14, tmp26) tmp28 = tmp15 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = 1e-06 tmp34 = tmp32 + tmp33 tmp35 = tmp28 / tmp34 tl.store(out_ptr2 + (r1 + 8 * x0), tmp35, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_cat_div_mul_relu_roll_sum_0[grid(64)](arg0_1, buf2, 64, 8, XBLOCK=32, num_warps=2, num_stages=1) del arg0_1 return buf2, class DPFPNew(Module): """ ## Deterministic Parameter Free Project (DPFP) This is the new projection function $ extcolor{lightgreen}{\\phi}$ introduced in the paper. DPFP projects $k$ of dimensionality $d_{key}$ to dimensionality $d_{dot} = 2 d_{key} u$, where $ u \\in \\{1, 2, ..., 2 d_{key} - 1 \\}$ is a hyper-parameter. $$ extcolor{lightgreen}{\\phi_{2 d_{key} (i - 1) + j}(k)} = ext{ReLU}\\Big(ig[k, -kig]\\Big)_{j} ext{ReLU}\\Big(ig[k, -kig]\\Big)_{i + j}$$ where $ig[k, -kig]$ is the concatenation of $k$ and $-k$ to give a vector of size $2 d_{key}$, $i \\in \\{1, 2, ..., u \\}$, and $j \\in \\{1, 2, ..., 2 d_{key}\\}$. $x_i$ is the $i$-th element of vector $x$ and is rolled around if $i$ is larger than the number of elements in $x$. Basically, it creates a new vector by multiplying elements of $[k, -k]$ shifted by $i$. This produces projections that are sparse (only a few elements of $phi$ are non-zero) and orthogonal ($ extcolor{lightgreen}{\\phi(k^{(i)})} \\cdot extcolor{lightgreen}{\\phi(k^{(j)})} pprox 0$ for most $i, j$ unless $k^{(i)}$ and $k^{(j)}$ are very similar. ### Normalization Paper introduces a simple normalization for $ extcolor{lightgreen}{\\phi}$, $$ extcolor{lightgreen}{\\phi '(k)} = rac{ extcolor{lightgreen}{\\phi(k)}}{\\sum^{d_{dot}}_{j=1} extcolor{lightgreen}{\\phi(k)_j}}$$ *Check the paper for derivation.* """ def __init__(self, nu: 'int'=1, eps: 'float'=1e-06): """ * `nu` is the hyper-parameter $ u$. * `eps` is the small value used to make sure there is no division-by-zero when normalizing. """ super().__init__() self.nu = nu self.relu = nn.ReLU() self.eps = eps def dpfp(self, k: 'torch.Tensor'): """ $$ extcolor{lightgreen}{\\phi(k)}$$ """ x = self.relu(torch.cat([k, -k], dim=-1)) x_rolled = [x.roll(shifts=i, dims=-1) for i in range(1, self.nu + 1)] x_rolled = torch.cat(x_rolled, dim=-1) x_repeat = torch.cat([x] * self.nu, dim=-1) return x_repeat * x_rolled def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
DPFP
false
16,544
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
DiscriminatorLoss
from torch.nn import Module import torch import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class DiscriminatorLoss(Module): """ ## Discriminator Loss We want to find $w$ to maximize $$\\mathbb{E}_{x \\sim \\mathbb{P}_r} [f_w(x)]- \\mathbb{E}_{z \\sim p(z)} [f_w(g_ heta(z))]$$, so we minimize, $$- rac{1}{m} \\sum_{i=1}^m f_w ig(x^{(i)} ig) + rac{1}{m} \\sum_{i=1}^m f_w ig( g_ heta(z^{(i)}) ig)$$ """ def forward(self, f_real: 'torch.Tensor', f_fake: 'torch.Tensor'): """ * `f_real` is $f_w(x)$ * `f_fake` is $f_w(g_ heta(z))$ This returns the a tuple with losses for $f_w(x)$ and $f_w(g_ heta(z))$, which are later added. They are kept separate for logging. """ return F.relu(1 - f_real).mean(), F.relu(1 + f_fake).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_relu_rsub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) @triton.jit def triton_per_fused_add_mean_relu_1(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_relu_rsub_0[grid(1)](buf2, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 triton_per_fused_add_mean_relu_1[grid(1)](buf3, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 return buf2, buf3 class DiscriminatorLossNew(Module): """ ## Discriminator Loss We want to find $w$ to maximize $$\\mathbb{E}_{x \\sim \\mathbb{P}_r} [f_w(x)]- \\mathbb{E}_{z \\sim p(z)} [f_w(g_ heta(z))]$$, so we minimize, $$- rac{1}{m} \\sum_{i=1}^m f_w ig(x^{(i)} ig) + rac{1}{m} \\sum_{i=1}^m f_w ig( g_ heta(z^{(i)}) ig)$$ """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
techthiyanes/annotated_deep_learning_paper_implementations
DiscriminatorLoss
false
16,545
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
ITN3D
import torch import torch.nn.functional as F import torch.nn as nn class ITN3D(nn.Module): def __init__(self, input_channels): super(ITN3D, self).__init__() use_bias = True self.conv11 = nn.Conv3d(input_channels, 2, kernel_size=3, padding=1, bias=use_bias) self.conv12 = nn.Conv3d(2, 4, kernel_size=3, padding=1, bias=use_bias) self.down1 = nn.Conv3d(4, 8, kernel_size=2, stride=2, bias=use_bias) self.conv21 = nn.Conv3d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.down2 = nn.Conv3d(8, 16, kernel_size=2, stride=2, bias=use_bias) self.conv31 = nn.Conv3d(16, 16, kernel_size=3, padding=1, bias=use_bias ) self.up2 = nn.ConvTranspose3d(16, 8, kernel_size=2, stride=2, bias= use_bias) self.conv22 = nn.Conv3d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.up1 = nn.ConvTranspose3d(8, 4, kernel_size=2, stride=2, bias= use_bias) self.conv13 = nn.Conv3d(4, 2, kernel_size=3, padding=1, bias=use_bias) self.conv14 = nn.Conv3d(2, 2, kernel_size=3, padding=1, bias=use_bias) self.conv15 = nn.Conv3d(2, input_channels, kernel_size=3, padding=1, bias=use_bias) def forward(self, x): x1 = F.relu(self.conv11(x)) x1 = F.relu(self.conv12(x1)) x2 = self.down1(x1) x2 = F.relu(self.conv21(x2)) x3 = self.down2(x2) x3 = F.relu(self.conv31(x3)) x2 = self.up2(x3) + x2 x2 = F.relu(self.conv22(x2)) x1 = self.up1(x2) + x1 x1 = F.relu(self.conv13(x1)) x1 = F.relu(self.conv14(x1)) x = self.conv15(x1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_threshold_backward_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp3 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_threshold_backward_8(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp3 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (2, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2, 3, 3, 3), (54, 27, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4, 2, 2, 2), (32, 8, 4, 2, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (8, 8, 3, 3, 3), (216, 27, 9, 3, 1)) assert_size_stride(primals_9, (8,), (1,)) assert_size_stride(primals_10, (16, 8, 2, 2, 2), (64, 8, 4, 2, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (16, 16, 3, 3, 3), (432, 27, 9, 3, 1)) assert_size_stride(primals_13, (16,), (1,)) assert_size_stride(primals_14, (16, 8, 2, 2, 2), (64, 8, 4, 2, 1)) assert_size_stride(primals_15, (8,), (1,)) assert_size_stride(primals_16, (8, 8, 3, 3, 3), (216, 27, 9, 3, 1)) assert_size_stride(primals_17, (8,), (1,)) assert_size_stride(primals_18, (8, 4, 2, 2, 2), (32, 8, 4, 2, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (2, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_21, (2,), (1,)) assert_size_stride(primals_22, (2, 2, 3, 3, 3), (54, 27, 9, 3, 1)) assert_size_stride(primals_23, (2,), (1,)) assert_size_stride(primals_24, (4, 2, 3, 3, 3), (54, 27, 9, 3, 1)) assert_size_stride(primals_25, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (2, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf30 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1, primals_2, buf30, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 2, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_relu_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_6, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (1, 8, 2, 2, 2), (64, 8, 4, 2, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (1, 8, 2, 2, 2), (0, 8, 4, 2, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf6, (1, 8, 2, 2, 2), (64, 8, 4, 2, 1)) buf7 = reinterpret_tensor(buf6, (8, 2, 2, 2), (8, 4, 2, 1), 0) del buf6 triton_poi_fused_relu_3[grid(64)](buf7, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(reinterpret_tensor(buf7, (1, 8, 2, 2, 2), (0, 8, 4, 2, 1), 0), primals_10, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf8, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1)) buf9 = reinterpret_tensor(buf8, (1, 16, 1, 1, 1), (16, 1, 16, 16, 16), 0) del buf8 triton_poi_fused_convolution_4[grid(16)](buf9, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (1, 16, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_12, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1)) buf11 = reinterpret_tensor(buf10, (16, 1, 1, 1), (1, 16, 16, 16), 0) del buf10 buf27 = empty_strided_cuda((16, 1, 1, 1), (1, 1, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(16)](buf11, primals_13, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 16, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_14, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf12, (1, 8, 2, 2, 2), (64, 8, 4, 2, 1)) buf13 = reinterpret_tensor(buf12, (8, 2, 2, 2), (8, 4, 2, 1), 0) del buf12 buf28 = empty_strided_cuda((8, 2, 2, 2), (8, 4, 2, 1), torch.bool) triton_poi_fused_add_threshold_backward_6[grid(64)](buf13, primals_15, buf7, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 8, 2, 2, 2), (0, 8, 4, 2, 1), 0), primals_16, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf14, (1, 8, 2, 2, 2), (64, 8, 4, 2, 1)) buf15 = reinterpret_tensor(buf14, (8, 2, 2, 2), (8, 4, 2, 1), 0) del buf14 buf26 = empty_strided_cuda((8, 2, 2, 2), (8, 4, 2, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf15, primals_17, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_17 buf16 = extern_kernels.convolution(reinterpret_tensor(buf15, (1, 8, 2, 2, 2), (0, 8, 4, 2, 1), 0), primals_18, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf16, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf17 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf16 buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_threshold_backward_8[grid(256)](buf17, primals_19, buf3, buf29, 256, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_19 buf18 = extern_kernels.convolution(reinterpret_tensor(buf17, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_20, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf18, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1)) buf19 = reinterpret_tensor(buf18, (2, 4, 4, 4), (64, 16, 4, 1), 0) del buf18 buf25 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf19, primals_21, buf25, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf20 = extern_kernels.convolution(reinterpret_tensor(buf19, (1, 2, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_22, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf20, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1)) buf21 = reinterpret_tensor(buf20, (2, 4, 4, 4), (64, 16, 4, 1), 0) del buf20 buf24 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf21, primals_23, buf24, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_23 buf22 = extern_kernels.convolution(reinterpret_tensor(buf21, (1, 2, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_24, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf22, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_9[grid(256)](buf23, primals_25, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 return (reinterpret_tensor(buf23, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0), reinterpret_tensor(buf3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf5, reinterpret_tensor(buf7, (1, 8, 2, 2, 2), (64, 8, 4, 2, 1), 0), reinterpret_tensor(buf9, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0), reinterpret_tensor(buf11, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0), reinterpret_tensor(buf13, (1, 8, 2, 2, 2), (64, 8, 4, 2, 1), 0), reinterpret_tensor(buf15, (1, 8, 2, 2, 2), (64, 8, 4, 2, 1), 0), reinterpret_tensor(buf17, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf19, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0), reinterpret_tensor(buf21, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0), buf24, buf25, buf26, buf27, buf28, buf29, buf30) class ITN3DNew(nn.Module): def __init__(self, input_channels): super(ITN3DNew, self).__init__() use_bias = True self.conv11 = nn.Conv3d(input_channels, 2, kernel_size=3, padding=1, bias=use_bias) self.conv12 = nn.Conv3d(2, 4, kernel_size=3, padding=1, bias=use_bias) self.down1 = nn.Conv3d(4, 8, kernel_size=2, stride=2, bias=use_bias) self.conv21 = nn.Conv3d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.down2 = nn.Conv3d(8, 16, kernel_size=2, stride=2, bias=use_bias) self.conv31 = nn.Conv3d(16, 16, kernel_size=3, padding=1, bias=use_bias ) self.up2 = nn.ConvTranspose3d(16, 8, kernel_size=2, stride=2, bias= use_bias) self.conv22 = nn.Conv3d(8, 8, kernel_size=3, padding=1, bias=use_bias) self.up1 = nn.ConvTranspose3d(8, 4, kernel_size=2, stride=2, bias= use_bias) self.conv13 = nn.Conv3d(4, 2, kernel_size=3, padding=1, bias=use_bias) self.conv14 = nn.Conv3d(2, 2, kernel_size=3, padding=1, bias=use_bias) self.conv15 = nn.Conv3d(2, input_channels, kernel_size=3, padding=1, bias=use_bias) def forward(self, input_0): primals_1 = self.conv11.weight primals_2 = self.conv11.bias primals_4 = self.conv12.weight primals_5 = self.conv12.bias primals_6 = self.down1.weight primals_7 = self.down1.bias primals_8 = self.conv21.weight primals_9 = self.conv21.bias primals_10 = self.down2.weight primals_11 = self.down2.bias primals_12 = self.conv31.weight primals_13 = self.conv31.bias primals_14 = self.up2.weight primals_15 = self.up2.bias primals_16 = self.conv22.weight primals_17 = self.conv22.bias primals_18 = self.up1.weight primals_19 = self.up1.bias primals_20 = self.conv13.weight primals_21 = self.conv13.bias primals_22 = self.conv14.weight primals_23 = self.conv14.bias primals_24 = self.conv15.weight primals_25 = self.conv15.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0]
swaroopkml96/istn
ITN3D
false
16,546
[ "Apache-2.0" ]
91
600543e071aa56907509aa090697295cdc69a6b1
https://github.com/swaroopkml96/istn/tree/600543e071aa56907509aa090697295cdc69a6b1
CrossEntropyBayesRisk
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class CrossEntropyBayesRisk(Module): """ <a id="CrossEntropyBayesRisk"></a> ## Bayes Risk with Cross Entropy Loss Bayes risk is the overall maximum cost of making incorrect estimates. It takes a cost function that gives the cost of making an incorrect estimate and sums it over all possible outcomes based on probability distribution. Here the cost function is cross-entropy loss, for one-hot coded $\\mathbf{y}$ $$\\sum_{k=1}^K -y_k \\log p_k$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K -y_k \\log p_k \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\psi(S) - \\psi( extcolor{orange}{lpha_k} ) igg) \\end{align} where $\\psi(\\cdot)$ is the $digamma$ function. """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 strength = alpha.sum(dim=-1) loss = (target * (torch.digamma(strength)[:, None] - torch.digamma( alpha))).sum(dim=-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_mean_mul_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + 4 * r3, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + 4 * r0 + 16 * r2), None, eviction_policy ='evict_last') tmp13 = tl.load(in_ptr2 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3 + 4 * r0 + 16 * r2), None, eviction_policy ='evict_last') tmp19 = tl.load(in_ptr2 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp8 = tmp6 - tmp7 tmp9 = tmp5 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp12 - tmp13 tmp15 = tmp11 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp18 - tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp26 = 64.0 tmp27 = tmp25 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = torch.ops.aten.digamma.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_1[grid(256)](arg0_1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf4 = torch.ops.aten.digamma.default(buf3) del buf3 buf5 = buf4 del buf4 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_per_fused_mean_mul_sub_sum_2[grid(1)](buf8, arg1_1, buf2, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf2 del buf5 return buf8, class CrossEntropyBayesRiskNew(Module): """ <a id="CrossEntropyBayesRisk"></a> ## Bayes Risk with Cross Entropy Loss Bayes risk is the overall maximum cost of making incorrect estimates. It takes a cost function that gives the cost of making an incorrect estimate and sums it over all possible outcomes based on probability distribution. Here the cost function is cross-entropy loss, for one-hot coded $\\mathbf{y}$ $$\\sum_{k=1}^K -y_k \\log p_k$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K -y_k \\log p_k \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\psi(S) - \\psi( extcolor{orange}{lpha_k} ) igg) \\end{align} where $\\psi(\\cdot)$ is the $digamma$ function. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
CrossEntropyBayesRisk
false
16,547
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
GatedRNNCell
import torch from torch import nn from functools import partial def get_initializer(name, activation): if activation in ['id', 'identity', 'linear', 'modrelu']: nonlinearity = 'linear' elif activation in ['relu', 'tanh', 'sigmoid']: nonlinearity = activation else: assert False, f'get_initializer: activation {activation} not supported' if name == 'uniform': initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity= nonlinearity) elif name == 'normal': initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity= nonlinearity) elif name == 'xavier': initializer = torch.nn.init.xavier_normal_ elif name == 'zero': initializer = partial(torch.nn.init.constant_, val=0) elif name == 'one': initializer = partial(torch.nn.init.constant_, val=1) else: assert False, f'get_initializer: initializer type {name} not supported' return initializer def Linear_(input_size, output_size, bias, init='normal', zero_bias_init= False, **kwargs): """ Returns a nn.Linear module with initialization options """ l = nn.Linear(input_size, output_size, bias=bias, **kwargs) get_initializer(init, 'linear')(l.weight) if bias and zero_bias_init: nn.init.zeros_(l.bias) return l def get_activation(activation, size): if activation == 'id': return nn.Identity() elif activation == 'tanh': return torch.tanh elif activation == 'relu': return torch.relu elif activation == 'sigmoid': return torch.sigmoid elif activation == 'modrelu': return Modrelu(size) else: raise NotImplementedError("hidden activation '{}' is not implemented" .format(activation)) class Gate(nn.Module): """ Implements gating mechanisms. Mechanisms: N - No gate G - Standard sigmoid gate """ def __init__(self, size, preact_ctor, preact_args, mechanism='N'): super().__init__() self.size = size self.mechanism = mechanism if self.mechanism == 'N': pass elif self.mechanism == 'G': self.W_g = preact_ctor(*preact_args) else: assert False, f'Gating type {self.mechanism} is not supported.' def forward(self, *inputs): if self.mechanism == 'N': return 1.0 if self.mechanism == 'G': g_preact = self.W_g(*inputs) g = torch.sigmoid(g_preact) return g class modrelu(nn.Module): def __init__(self, features): super(modrelu, self).__init__() self.features = features self.b = nn.Parameter(torch.Tensor(self.features)) self.reset_parameters() def reset_parameters(self): self.b.data.uniform_(-0.01, 0.01) def forward(self, inputs): norm = torch.abs(inputs) biased_norm = norm + self.b magnitude = nn.functional.relu(biased_norm) phase = torch.sign(inputs) return phase * magnitude class Parametrization(nn.Module): """ Implements the parametrization of a manifold in terms of a Euclidean space It gives the parametrized matrix through the attribute `B` To use it, subclass it and implement the method `retraction` and the method `forward` (and optionally `project`). See the documentation in these methods for details You can find an example in the file `orthogonal.py` where we implement the Orthogonal class to optimize over the Stiefel manifold using an arbitrary retraction """ def __init__(self, A, base, mode): """ mode: "static" or a tuple such that: mode[0] == "dynamic" mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K. """ super(Parametrization, self).__init__() assert mode == 'static' or isinstance(mode, tuple) and len(mode ) == 3 and mode[0] == 'dynamic' self.A = nn.Parameter(A) self.register_buffer('_B', None) self.register_buffer('base', base) if mode == 'static': self.mode = mode else: self.mode = mode[0] self.K = mode[1] self.M = mode[2] self.k = 0 self.m = 0 def hook(grad): nonlocal self self._B = None self.A.register_hook(hook) def rebase(self): with torch.no_grad(): self.base.data.copy_(self._B.data) self.A.data.zero_() @property def B(self): not_B = self._B is None if not_B or not self._B.grad_fn and torch.is_grad_enabled(): self._B = self.retraction(self.A, self.base) self._B.requires_grad_() self._B.retain_grad() if self.mode == 'dynamic' and not_B: if self.k == 0: self.rebase() self.m = (self.m + 1) % self.M if self.m == 0 and hasattr(self, 'project'): with torch.no_grad(): self.base = self.project(self.base) if self.K != 'infty': self.k = (self.k + 1) % self.K elif self.k == 0: self.k = 1 return self._B def retraction(self, A, base): """ It computes r_{base}(A). Notice that A will not always be in the tangent space of our manifold For this reason, we first have to use A to parametrize the tangent space, and then compute the retraction When dealing with Lie groups, raw_A is always projected into the Lie algebra, as an optimization (cf. Section E in the paper) """ raise NotImplementedError def project(self, base): """ This method is OPTIONAL It returns the projected base back into the manifold """ raise NotImplementedError def forward(self, input): """ It uses the attribute self.B to implement the layer itself (e.g. Linear, CNN, ...) """ raise NotImplementedError class Orthogonal(Parametrization): """ Class that implements optimization restricted to the Stiefel manifold """ def __init__(self, input_size, output_size, initializer_skew, mode, param): """ mode: "static" or a tuple such that: mode[0] == "dynamic" mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K. param: A parametrization of in terms of skew-symmetyric matrices """ max_size = max(input_size, output_size) A = torch.empty(max_size, max_size) base = torch.empty(input_size, output_size) super(Orthogonal, self).__init__(A, base, mode) self.input_size = input_size self.output_size = output_size self.param = param self.init_A = initializer_skew self.init_base = nn.init.eye_ self.reset_parameters() def reset_parameters(self): self.init_A(self.A) self.init_base(self.base) def forward(self, input): return input.matmul(self.B) def retraction(self, A, base): A = A.triu(diagonal=1) A = A - A.t() B = base.mm(self.param(A)) if self.input_size != self.output_size: B = B[:self.input_size, :self.output_size] return B def project(self, base): try: U, _, V = torch.svd(base, some=True) return U.mm(V.t()) except RuntimeError: x = base if base.size(0) < base.size(1): x = base.t() ret = torch.qr(x, some=True).Q if base.size(0) < base.size(1): ret = ret.t() return ret class CellBase(nn.Module): """ Abstract class for our recurrent cell interface. Passes input through """ registry = {} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if hasattr(cls, 'name') and cls.name is not None: cls.registry[cls.name] = cls name = 'id' valid_keys = [] def default_initializers(self): return {} def default_architecture(self): return {} def __init__(self, input_size, hidden_size, initializers=None, architecture=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.architecture = self.default_architecture() self.initializers = self.default_initializers() if initializers is not None: self.initializers.update(initializers) None if architecture is not None: self.architecture.update(architecture) assert set(self.initializers.keys()).issubset(self.valid_keys) assert set(self.architecture.keys()).issubset(self.valid_keys) self.reset_parameters() def reset_parameters(self): pass def forward(self, input, hidden): return input, input def default_state(self, input, batch_size=None): return input.new_zeros(input.size(0) if batch_size is None else batch_size, self.hidden_size, requires_grad=False) def output(self, h): return h def state_size(self): return self.hidden_size def output_size(self): return self.hidden_size def initial_state(self, trainable=False): """ Return initial state of the RNN This should not need to see the input as it should be batch size agnostic and automatically broadcasted # TODO Currently not used """ if trainable: self.initial_state = torch.zeros(self.hidden_size, requires_grad=True) else: return torch.zeros(self.hidden_size, requires_grad=True) class Modrelu(modrelu): def reset_parameters(self): self.b.data.uniform_(-0.0, 0.0) class OrthogonalLinear(Orthogonal): def __init__(self, input_size, output_size, method='exprnn', init= 'cayley', K=100): """ Wrapper around expRNN's Orthogonal class taking care of parameter names """ if method == 'exprnn': mode = 'static' param = 'expm' elif method == 'dtriv': mode = 'dynamic', ortho_args['K'], 100 param = 'expm' elif method == 'cayley': mode = 'static' param = 'cayley' else: assert False, f'OrthogonalLinear: orthogonal method {method} not supported' param = param_name_to_param[param] init_A = init_name_to_init[init] super().__init__(input_size, output_size, init_A, mode, param) class RNNCell(CellBase): name = 'rnn' valid_keys = ['hx', 'hh', 'bias'] def default_initializers(self): return {'hx': 'xavier', 'hh': 'xavier'} def default_architecture(self): return {'bias': True} def __init__(self, input_size, hidden_size, hidden_activation='tanh', orthogonal=False, ortho_args=None, zero_bias_init=False, **kwargs): self.hidden_activation = hidden_activation self.orthogonal = orthogonal self.ortho_args = ortho_args self.zero_bias_init = zero_bias_init super().__init__(input_size, hidden_size, **kwargs) def reset_parameters(self): self.W_hx = Linear_(self.input_size, self.hidden_size, bias=self. architecture['bias'], zero_bias_init=self.zero_bias_init) get_initializer(self.initializers['hx'], self.hidden_activation)(self .W_hx.weight) self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size) self.reset_hidden_to_hidden() def reset_hidden_to_hidden(self): if self.orthogonal: if self.ortho_args is None: self.ortho_args = {} self.ortho_args['input_size'] = self.hidden_size self.ortho_args['output_size'] = self.hidden_size self.W_hh = OrthogonalLinear(**self.ortho_args) else: self.W_hh = nn.Linear(self.hidden_size, self.hidden_size, bias= self.architecture['bias']) get_initializer(self.initializers['hh'], self.hidden_activation)( self.W_hh.weight) def forward(self, input, h): hidden_preact = self.W_hx(input) + self.W_hh(h) hidden = self.hidden_activation_fn(hidden_preact) return hidden, hidden class GatedRNNCell(RNNCell): name = 'gru' def __init__(self, input_size, hidden_size, gate='G', reset='N', **kwargs): self.gate = gate self.reset = reset super().__init__(input_size, hidden_size, **kwargs) def reset_parameters(self): super().reset_parameters() preact_ctor = Linear_ preact_args = [self.input_size + self.hidden_size, self.hidden_size, self.architecture['bias']] self.W_g = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.gate) self.W_reset = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.reset) def forward(self, input, h): hx = torch.cat((input, h), dim=-1) reset = self.W_reset(hx) _, update = super().forward(input, reset * h) g = self.W_g(hx) h = (1.0 - g) * h + g * update return h, h def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from functools import partial assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x2, xmask) tmp12 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp9 = tl.sigmoid(tmp8) tmp10 = 1.0 tmp11 = tmp10 - tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp9 * tmp7 tmp15 = tmp13 + tmp14 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](primals_1, primals_2, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_7 del primals_8 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(256)](buf3, primals_4, buf2, primals_6, buf5, primals_2, buf6, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf2 del primals_4 del primals_6 return buf6, primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf3, reinterpret_tensor(buf4, (64, 8), (8, 1), 0), buf5 def get_initializer(name, activation): if activation in ['id', 'identity', 'linear', 'modrelu']: nonlinearity = 'linear' elif activation in ['relu', 'tanh', 'sigmoid']: nonlinearity = activation else: assert False, f'get_initializer: activation {activation} not supported' if name == 'uniform': initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity= nonlinearity) elif name == 'normal': initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity= nonlinearity) elif name == 'xavier': initializer = torch.nn.init.xavier_normal_ elif name == 'zero': initializer = partial(torch.nn.init.constant_, val=0) elif name == 'one': initializer = partial(torch.nn.init.constant_, val=1) else: assert False, f'get_initializer: initializer type {name} not supported' return initializer def Linear_(input_size, output_size, bias, init='normal', zero_bias_init= False, **kwargs): """ Returns a nn.Linear module with initialization options """ l = nn.Linear(input_size, output_size, bias=bias, **kwargs) get_initializer(init, 'linear')(l.weight) if bias and zero_bias_init: nn.init.zeros_(l.bias) return l def get_activation(activation, size): if activation == 'id': return nn.Identity() elif activation == 'tanh': return torch.tanh elif activation == 'relu': return torch.relu elif activation == 'sigmoid': return torch.sigmoid elif activation == 'modrelu': return Modrelu(size) else: raise NotImplementedError("hidden activation '{}' is not implemented" .format(activation)) class Gate(nn.Module): """ Implements gating mechanisms. Mechanisms: N - No gate G - Standard sigmoid gate """ def __init__(self, size, preact_ctor, preact_args, mechanism='N'): super().__init__() self.size = size self.mechanism = mechanism if self.mechanism == 'N': pass elif self.mechanism == 'G': self.W_g = preact_ctor(*preact_args) else: assert False, f'Gating type {self.mechanism} is not supported.' def forward(self, *inputs): if self.mechanism == 'N': return 1.0 if self.mechanism == 'G': g_preact = self.W_g(*inputs) g = torch.sigmoid(g_preact) return g class modrelu(nn.Module): def __init__(self, features): super(modrelu, self).__init__() self.features = features self.b = nn.Parameter(torch.Tensor(self.features)) self.reset_parameters() def reset_parameters(self): self.b.data.uniform_(-0.01, 0.01) def forward(self, inputs): norm = torch.abs(inputs) biased_norm = norm + self.b magnitude = nn.functional.relu(biased_norm) phase = torch.sign(inputs) return phase * magnitude class Parametrization(nn.Module): """ Implements the parametrization of a manifold in terms of a Euclidean space It gives the parametrized matrix through the attribute `B` To use it, subclass it and implement the method `retraction` and the method `forward` (and optionally `project`). See the documentation in these methods for details You can find an example in the file `orthogonal.py` where we implement the Orthogonal class to optimize over the Stiefel manifold using an arbitrary retraction """ def __init__(self, A, base, mode): """ mode: "static" or a tuple such that: mode[0] == "dynamic" mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K. """ super(Parametrization, self).__init__() assert mode == 'static' or isinstance(mode, tuple) and len(mode ) == 3 and mode[0] == 'dynamic' self.A = nn.Parameter(A) self.register_buffer('_B', None) self.register_buffer('base', base) if mode == 'static': self.mode = mode else: self.mode = mode[0] self.K = mode[1] self.M = mode[2] self.k = 0 self.m = 0 def hook(grad): nonlocal self self._B = None self.A.register_hook(hook) def rebase(self): with torch.no_grad(): self.base.data.copy_(self._B.data) self.A.data.zero_() @property def B(self): not_B = self._B is None if not_B or not self._B.grad_fn and torch.is_grad_enabled(): self._B = self.retraction(self.A, self.base) self._B.requires_grad_() self._B.retain_grad() if self.mode == 'dynamic' and not_B: if self.k == 0: self.rebase() self.m = (self.m + 1) % self.M if self.m == 0 and hasattr(self, 'project'): with torch.no_grad(): self.base = self.project(self.base) if self.K != 'infty': self.k = (self.k + 1) % self.K elif self.k == 0: self.k = 1 return self._B def retraction(self, A, base): """ It computes r_{base}(A). Notice that A will not always be in the tangent space of our manifold For this reason, we first have to use A to parametrize the tangent space, and then compute the retraction When dealing with Lie groups, raw_A is always projected into the Lie algebra, as an optimization (cf. Section E in the paper) """ raise NotImplementedError def project(self, base): """ This method is OPTIONAL It returns the projected base back into the manifold """ raise NotImplementedError def forward(self, input): """ It uses the attribute self.B to implement the layer itself (e.g. Linear, CNN, ...) """ raise NotImplementedError class Orthogonal(Parametrization): """ Class that implements optimization restricted to the Stiefel manifold """ def __init__(self, input_size, output_size, initializer_skew, mode, param): """ mode: "static" or a tuple such that: mode[0] == "dynamic" mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K. param: A parametrization of in terms of skew-symmetyric matrices """ max_size = max(input_size, output_size) A = torch.empty(max_size, max_size) base = torch.empty(input_size, output_size) super(Orthogonal, self).__init__(A, base, mode) self.input_size = input_size self.output_size = output_size self.param = param self.init_A = initializer_skew self.init_base = nn.init.eye_ self.reset_parameters() def reset_parameters(self): self.init_A(self.A) self.init_base(self.base) def forward(self, input): return input.matmul(self.B) def retraction(self, A, base): A = A.triu(diagonal=1) A = A - A.t() B = base.mm(self.param(A)) if self.input_size != self.output_size: B = B[:self.input_size, :self.output_size] return B def project(self, base): try: U, _, V = torch.svd(base, some=True) return U.mm(V.t()) except RuntimeError: x = base if base.size(0) < base.size(1): x = base.t() ret = torch.qr(x, some=True).Q if base.size(0) < base.size(1): ret = ret.t() return ret class CellBase(nn.Module): """ Abstract class for our recurrent cell interface. Passes input through """ registry = {} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if hasattr(cls, 'name') and cls.name is not None: cls.registry[cls.name] = cls name = 'id' valid_keys = [] def default_initializers(self): return {} def default_architecture(self): return {} def __init__(self, input_size, hidden_size, initializers=None, architecture=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.architecture = self.default_architecture() self.initializers = self.default_initializers() if initializers is not None: self.initializers.update(initializers) None if architecture is not None: self.architecture.update(architecture) assert set(self.initializers.keys()).issubset(self.valid_keys) assert set(self.architecture.keys()).issubset(self.valid_keys) self.reset_parameters() def reset_parameters(self): pass def forward(self, input, hidden): return input, input def default_state(self, input, batch_size=None): return input.new_zeros(input.size(0) if batch_size is None else batch_size, self.hidden_size, requires_grad=False) def output(self, h): return h def state_size(self): return self.hidden_size def output_size(self): return self.hidden_size def initial_state(self, trainable=False): """ Return initial state of the RNN This should not need to see the input as it should be batch size agnostic and automatically broadcasted # TODO Currently not used """ if trainable: self.initial_state = torch.zeros(self.hidden_size, requires_grad=True) else: return torch.zeros(self.hidden_size, requires_grad=True) class Modrelu(modrelu): def reset_parameters(self): self.b.data.uniform_(-0.0, 0.0) class OrthogonalLinear(Orthogonal): def __init__(self, input_size, output_size, method='exprnn', init= 'cayley', K=100): """ Wrapper around expRNN's Orthogonal class taking care of parameter names """ if method == 'exprnn': mode = 'static' param = 'expm' elif method == 'dtriv': mode = 'dynamic', ortho_args['K'], 100 param = 'expm' elif method == 'cayley': mode = 'static' param = 'cayley' else: assert False, f'OrthogonalLinear: orthogonal method {method} not supported' param = param_name_to_param[param] init_A = init_name_to_init[init] super().__init__(input_size, output_size, init_A, mode, param) class RNNCell(CellBase): name = 'rnn' valid_keys = ['hx', 'hh', 'bias'] def default_initializers(self): return {'hx': 'xavier', 'hh': 'xavier'} def default_architecture(self): return {'bias': True} def __init__(self, input_size, hidden_size, hidden_activation='tanh', orthogonal=False, ortho_args=None, zero_bias_init=False, **kwargs): self.hidden_activation = hidden_activation self.orthogonal = orthogonal self.ortho_args = ortho_args self.zero_bias_init = zero_bias_init super().__init__(input_size, hidden_size, **kwargs) def reset_parameters(self): self.W_hx = Linear_(self.input_size, self.hidden_size, bias=self. architecture['bias'], zero_bias_init=self.zero_bias_init) get_initializer(self.initializers['hx'], self.hidden_activation)(self .W_hx.weight) self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size) self.reset_hidden_to_hidden() def reset_hidden_to_hidden(self): if self.orthogonal: if self.ortho_args is None: self.ortho_args = {} self.ortho_args['input_size'] = self.hidden_size self.ortho_args['output_size'] = self.hidden_size self.W_hh = OrthogonalLinear(**self.ortho_args) else: self.W_hh = nn.Linear(self.hidden_size, self.hidden_size, bias= self.architecture['bias']) get_initializer(self.initializers['hh'], self.hidden_activation)( self.W_hh.weight) def forward(self, input, h): hidden_preact = self.W_hx(input) + self.W_hh(h) hidden = self.hidden_activation_fn(hidden_preact) return hidden, hidden class GatedRNNCellNew(RNNCell): name = 'gru' def __init__(self, input_size, hidden_size, gate='G', reset='N', **kwargs): self.gate = gate self.reset = reset super().__init__(input_size, hidden_size, **kwargs) def reset_parameters(self): super().reset_parameters() preact_ctor = Linear_ preact_args = [self.input_size + self.hidden_size, self.hidden_size, self.architecture['bias']] self.W_g = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.gate) self.W_reset = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.reset) def forward(self, input_0, input_1): primals_3 = self.W_hx.weight primals_4 = self.W_hx.bias primals_5 = self.W_hh.weight primals_6 = self.W_hh.bias primals_7 = self.W_g.W_g.weight primals_8 = self.W_g.W_g.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
tarepan/HiPPO
GatedRNNCell
false
16,548
[ "Apache-2.0" ]
57
bc23e2dba13da6c307cb5a4ae248c2d2c56d465f
https://github.com/tarepan/HiPPO/tree/bc23e2dba13da6c307cb5a4ae248c2d2c56d465f
MaximumLikelihoodLoss
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class MaximumLikelihoodLoss(Module): """ <a id="MaximumLikelihoodLoss"></a> ## Type II Maximum Likelihood Loss The distribution $D(\\mathbf{p} ert extcolor{orange}{\\mathbf{lpha}})$ is a prior on the likelihood $Multi(\\mathbf{y} ert p)$, and the negative log marginal likelihood is calculated by integrating over class probabilities $\\mathbf{p}$. If target probabilities (one-hot targets) are $y_k$ for a given sample the loss is, egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\prod_{k=1}^K p_k^{y_k} rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\log S - \\log extcolor{orange}{lpha_k} igg) \\end{align} """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 strength = alpha.sum(dim=-1) loss = (target * (strength.log()[:, None] - alpha.log())).sum(dim=-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (16 * r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (1 + 16 * r0 + 64 * r2), None, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 16 * r0 + 64 * r2), None, eviction_policy ='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * r3, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (4 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (5 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (6 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (7 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (8 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr1 + (9 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr1 + (10 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr1 + (11 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr1 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp57 = tl.load(in_ptr0 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr1 + (12 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr1 + (13 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp63 = tl.load(in_ptr1 + (14 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr1 + (15 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr1 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp5 = tmp4 + tmp2 tmp6 = tmp3 + tmp5 tmp8 = tmp7 + tmp2 tmp9 = tmp6 + tmp8 tmp11 = tmp10 + tmp2 tmp12 = tmp9 + tmp11 tmp13 = tl_math.log(tmp12) tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = tmp0 * tmp17 tmp21 = tmp20 + tmp2 tmp23 = tmp22 + tmp2 tmp24 = tmp21 + tmp23 tmp26 = tmp25 + tmp2 tmp27 = tmp24 + tmp26 tmp29 = tmp28 + tmp2 tmp30 = tmp27 + tmp29 tmp31 = tl_math.log(tmp30) tmp33 = tmp32 + tmp2 tmp34 = tl_math.log(tmp33) tmp35 = tmp31 - tmp34 tmp36 = tmp19 * tmp35 tmp37 = tmp18 + tmp36 tmp40 = tmp39 + tmp2 tmp42 = tmp41 + tmp2 tmp43 = tmp40 + tmp42 tmp45 = tmp44 + tmp2 tmp46 = tmp43 + tmp45 tmp48 = tmp47 + tmp2 tmp49 = tmp46 + tmp48 tmp50 = tl_math.log(tmp49) tmp52 = tmp51 + tmp2 tmp53 = tl_math.log(tmp52) tmp54 = tmp50 - tmp53 tmp55 = tmp38 * tmp54 tmp56 = tmp37 + tmp55 tmp59 = tmp58 + tmp2 tmp61 = tmp60 + tmp2 tmp62 = tmp59 + tmp61 tmp64 = tmp63 + tmp2 tmp65 = tmp62 + tmp64 tmp67 = tmp66 + tmp2 tmp68 = tmp65 + tmp67 tmp69 = tl_math.log(tmp68) tmp71 = tmp70 + tmp2 tmp72 = tl_math.log(tmp71) tmp73 = tmp69 - tmp72 tmp74 = tmp57 * tmp73 tmp75 = tmp56 + tmp74 tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK]) tmp78 = tl.sum(tmp76, 1)[:, None] tmp79 = 64.0 tmp80 = tmp78 / tmp79 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp80, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_log_mean_mul_sub_sum_0[grid(1)](buf2, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class MaximumLikelihoodLossNew(Module): """ <a id="MaximumLikelihoodLoss"></a> ## Type II Maximum Likelihood Loss The distribution $D(\\mathbf{p} ert extcolor{orange}{\\mathbf{lpha}})$ is a prior on the likelihood $Multi(\\mathbf{y} ert p)$, and the negative log marginal likelihood is calculated by integrating over class probabilities $\\mathbf{p}$. If target probabilities (one-hot targets) are $y_k$ for a given sample the loss is, egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\prod_{k=1}^K p_k^{y_k} rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K y_k igg( \\log S - \\log extcolor{orange}{lpha_k} igg) \\end{align} """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
MaximumLikelihoodLoss
false
16,549
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
EqualizedWeight
import math import torch import numpy as np from torch import nn import torch.utils.data from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c def get_inputs(): return [] def get_init_inputs(): return [[], {'shape': [4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np from torch import nn import torch.utils.data from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 return buf0, class EqualizedWeightNew(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): primals_1 = self.weight output = call([primals_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
EqualizedWeight
false
16,550
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
MarginLoss
from torch.nn import Module import torch import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class MarginLoss(Module): '\n ## Margin loss for class existence\n\n A separate margin loss is used for each output capsule and the total loss is the sum of them.\n The length of each output capsule is the probability that class is present in the input.\n\n Loss for each output capsule or class $k$ is,\n $$\\mathcal{L}_k = T_k \\max(0, m^{+} - \\lVert\\mathbf{v}_k\rVert)^2 +\n \\lambda (1 - T_k) \\max(0, \\lVert\\mathbf{v}_k\rVert - m^{-})^2$$\n\n $T_k$ is $1$ if the class $k$ is present and $0$ otherwise.\n The first component of the loss is $0$ when the class is not present,\n and the second component is $0$ if the class is present.\n The $\\max(0, x)$ is used to avoid predictions going to extremes.\n $m^{+}$ is set to be $0.9$ and $m^{-}$ to be $0.1$ in the paper.\n\n The $\\lambda$ down-weighting is used to stop the length of all capsules from\n falling during the initial phase of training.\n ' def __init__(self, *, n_labels: int, lambda_: float=0.5, m_positive: float=0.9, m_negative: float=0.1): super().__init__() self.m_negative = m_negative self.m_positive = m_positive self.lambda_ = lambda_ self.n_labels = n_labels def forward(self, v: 'torch.Tensor', labels: 'torch.Tensor'): """ `v`, $\\mathbf{v}_j$ are the squashed output capsules. This has shape `[batch_size, n_labels, n_features]`; that is, there is a capsule for each label. `labels` are the labels, and has shape `[batch_size]`. """ v_norm = torch.sqrt((v ** 2).sum(dim=-1)) labels = torch.eye(self.n_labels, device=labels.device)[labels] loss = labels * F.relu(self.m_positive - v_norm) + self.lambda_ * ( 1.0 - labels) * F.relu(v_norm - self.m_negative) return loss.sum(dim=-1).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'n_labels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tmp4 tmp7 = x0 tmp8 = tmp6 == tmp7 tmp9 = 1.0 tmp10 = 0.0 tmp11 = tl.where(tmp8, tmp9, tmp10) tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 0.9 tmp25 = tmp24 - tmp23 tmp26 = tl.full([1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = tmp11 * tmp27 tmp29 = tmp9 - tmp11 tmp30 = 0.5 tmp31 = tmp29 * tmp30 tmp32 = 0.1 tmp33 = tmp23 - tmp32 tmp34 = triton_helpers.maximum(tmp26, tmp33) tmp35 = tmp31 * tmp34 tmp36 = tmp28 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) @triton.jit def triton_per_fused_mean_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 16.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0[grid (64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_mean_sum_1[grid(1)](buf2, buf0, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, class MarginLossNew(Module): '\n ## Margin loss for class existence\n\n A separate margin loss is used for each output capsule and the total loss is the sum of them.\n The length of each output capsule is the probability that class is present in the input.\n\n Loss for each output capsule or class $k$ is,\n $$\\mathcal{L}_k = T_k \\max(0, m^{+} - \\lVert\\mathbf{v}_k\rVert)^2 +\n \\lambda (1 - T_k) \\max(0, \\lVert\\mathbf{v}_k\rVert - m^{-})^2$$\n\n $T_k$ is $1$ if the class $k$ is present and $0$ otherwise.\n The first component of the loss is $0$ when the class is not present,\n and the second component is $0$ if the class is present.\n The $\\max(0, x)$ is used to avoid predictions going to extremes.\n $m^{+}$ is set to be $0.9$ and $m^{-}$ to be $0.1$ in the paper.\n\n The $\\lambda$ down-weighting is used to stop the length of all capsules from\n falling during the initial phase of training.\n ' def __init__(self, *, n_labels: int, lambda_: float=0.5, m_positive: float=0.9, m_negative: float=0.1): super().__init__() self.m_negative = m_negative self.m_positive = m_positive self.lambda_ = lambda_ self.n_labels = n_labels def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
MarginLoss
false
16,551
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Conv1dCompression
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class Conv1dCompression(Module): """ ## 1D Convolution Compression $f_c$ This is a simple wrapper around [`nn.Conv1d`](https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html) with some tensor dimension permutations. """ def __init__(self, compression_rate: 'int', d_model: 'int'): """ * `compression_rate` $c$ * `d_model` is the embedding size """ super().__init__() self.conv = nn.Conv1d(d_model, d_model, kernel_size= compression_rate, stride=compression_rate) def forward(self, mem: 'torch.Tensor'): """ `mem` has shape `[seq_len, batch, d_model]` """ mem = mem.permute(1, 2, 0) c_mem = self.conv(mem) return c_mem.permute(2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'compression_rate': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (1, 4, 4), (1, 4, 1), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0) class Conv1dCompressionNew(Module): """ ## 1D Convolution Compression $f_c$ This is a simple wrapper around [`nn.Conv1d`](https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html) with some tensor dimension permutations. """ def __init__(self, compression_rate: 'int', d_model: 'int'): """ * `compression_rate` $c$ * `d_model` is the embedding size """ super().__init__() self.conv = nn.Conv1d(d_model, d_model, kernel_size= compression_rate, stride=compression_rate) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
Conv1dCompression
false
16,552
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
MLP
import torch def choose_nonlinearity(name): nl = None if name == 'tanh': nl = torch.tanh elif name == 'relu': nl = torch.relu elif name == 'sigmoid': nl = torch.sigmoid elif name == 'softplus': nl = torch.nn.functional.softplus elif name == 'selu': nl = torch.nn.functional.selu elif name == 'elu': nl = torch.nn.functional.elu elif name == 'swish': def nl(x): return x * torch.sigmoid(x) else: raise ValueError('nonlinearity not recognized') return nl class MLP(torch.nn.Module): """Just a salt-of-the-earth MLP""" def __init__(self, input_dim, hidden_dim, output_dim, nonlinearity='tanh'): super(MLP, self).__init__() self.linear1 = torch.nn.Linear(input_dim, hidden_dim) self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear3 = torch.nn.Linear(hidden_dim, output_dim, bias=None) for l in [self.linear1, self.linear2, self.linear3]: torch.nn.init.orthogonal_(l.weight) self.nonlinearity = choose_nonlinearity(nonlinearity) def forward(self, x, separate_fields=False): h = self.nonlinearity(self.linear1(x)) h = self.nonlinearity(self.linear2(h)) return self.linear3(h) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 def choose_nonlinearity(name): nl = None if name == 'tanh': nl = torch.tanh elif name == 'relu': nl = torch.relu elif name == 'sigmoid': nl = torch.sigmoid elif name == 'softplus': nl = torch.nn.functional.softplus elif name == 'selu': nl = torch.nn.functional.selu elif name == 'elu': nl = torch.nn.functional.elu elif name == 'swish': def nl(x): return x * torch.sigmoid(x) else: raise ValueError('nonlinearity not recognized') return nl class MLPNew(torch.nn.Module): """Just a salt-of-the-earth MLP""" def __init__(self, input_dim, hidden_dim, output_dim, nonlinearity='tanh'): super(MLPNew, self).__init__() self.linear1 = torch.nn.Linear(input_dim, hidden_dim) self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear3 = torch.nn.Linear(hidden_dim, output_dim, bias=None) for l in [self.linear1, self.linear2, self.linear3]: torch.nn.init.orthogonal_(l.weight) self.nonlinearity = choose_nonlinearity(nonlinearity) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
tailintalent/hamiltonian-nn
MLP
false
16,553
[ "Apache-2.0" ]
293
1f6dd2d58ab84977a30584f0d1dd7f8b234e4049
https://github.com/tailintalent/hamiltonian-nn/tree/1f6dd2d58ab84977a30584f0d1dd7f8b234e4049
ChannelNorm
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class ChannelNorm(Module): """ ## Channel Normalization This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise. """ def __init__(self, channels, groups, eps: 'float'=1e-05, affine: 'bool' =True): """ * `groups` is the number of groups the features are divided into * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() self.channels = channels self.groups = groups self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(groups)) self.shift = nn.Parameter(torch.zeros(groups)) def forward(self, x: 'torch.Tensor'): """ `x` is a tensor of shape `[batch_size, channels, *]`. `*` denotes any number of (possibly 0) dimensions. For example, in an image (2D) convolution this will be `[batch_size, channels, height, width]` """ x_shape = x.shape batch_size = x_shape[0] assert self.channels == x.shape[1] x = x.view(batch_size, self.groups, -1) mean = x.mean(dim=[-1], keepdim=True) mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True) var = mean_x2 - mean ** 2 x_norm = (x - mean) / torch.sqrt(var + self.eps) if self.affine: x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1) return x_norm.view(x_shape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + 0) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp23 = tl.load(in_ptr2 + 0) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp20 = tmp0 - tmp11 tmp21 = tmp20 / tmp17 tmp22 = tmp19 * tmp21 tmp25 = tmp22 + tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp25, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 1, 64), (64, 64, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, buf1, buf3 class ChannelNormNew(Module): """ ## Channel Normalization This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise. """ def __init__(self, channels, groups, eps: 'float'=1e-05, affine: 'bool' =True): """ * `groups` is the number of groups the features are divided into * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() self.channels = channels self.groups = groups self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(groups)) self.shift = nn.Parameter(torch.zeros(groups)) def forward(self, input_0): primals_2 = self.scale primals_3 = self.shift primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
ChannelNorm
false
16,554
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
KLDivLoss
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class KLDivLoss(Module): """ ## KL-Divergence loss This calculates the KL divergence between a given normal distribution and $\\mathcal{N}(0, 1)$ """ def forward(self, sigma_hat: 'torch.Tensor', mu: 'torch.Tensor'): return -0.5 * torch.mean(1 + sigma_hat - mu ** 2 - torch.exp(sigma_hat) ) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tmp13 = -0.5 tmp14 = tmp12 * tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_exp_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class KLDivLossNew(Module): """ ## KL-Divergence loss This calculates the KL divergence between a given normal distribution and $\\mathcal{N}(0, 1)$ """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
KLDivLoss
false
16,555
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
BinaryClassificationHead
from _paritybench_helpers import _mock_config import torch class BinaryClassificationHead(torch.nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.out_proj = torch.nn.Linear(config.hidden_size, 1) def init_weights(self): self.dense.weight.data.normal_(mean=0.0, std=self.config. initializer_range) if self.dense.bias is not None: self.dense.bias.data.zero_() def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 return reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4 class BinaryClassificationHeadNew(torch.nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.out_proj = torch.nn.Linear(config.hidden_size, 1) def init_weights(self): self.dense.weight.data.normal_(mean=0.0, std=self.config. initializer_range) if self.dense.bias is not None: self.dense.bias.data.zero_() def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
techthiyanes/DeepPavlov
BinaryClassificationHead
false
16,556
[ "Apache-2.0" ]
5,893
08555428388fed3c7b036c0a82a70a25efcabcff
https://github.com/techthiyanes/DeepPavlov/tree/08555428388fed3c7b036c0a82a70a25efcabcff
MiniBatchStdDev
import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class MiniBatchStdDev(nn.Module): """ <a id="mini_batch_std_dev"></a> ### Mini-batch Standard Deviation Mini-batch standard deviation calculates the standard deviation across a mini-batch (or a subgroups within the mini-batch) for each feature in the feature map. Then it takes the mean of all the standard deviations and appends it to the feature map as one extra feature. """ def __init__(self, group_size: 'int'=4): """ * `group_size` is the number of samples to calculate standard deviation across. """ super().__init__() self.group_size = group_size def forward(self, x: 'torch.Tensor'): """ * `x` is the feature map """ assert x.shape[0] % self.group_size == 0 grouped = x.view(self.group_size, -1) std = torch.sqrt(grouped.var(dim=0) + 1e-08) std = std.mean().view(1, 1, 1, 1) b, _, h, w = x.shape std = std.expand(b, -1, h, w) return torch.cat([x, std], dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_cat_mean_sqrt_var_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tmp22 = 1e-08 tmp23 = tmp21 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp29, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_cat_mean_sqrt_var_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class MiniBatchStdDevNew(nn.Module): """ <a id="mini_batch_std_dev"></a> ### Mini-batch Standard Deviation Mini-batch standard deviation calculates the standard deviation across a mini-batch (or a subgroups within the mini-batch) for each feature in the feature map. Then it takes the mean of all the standard deviations and appends it to the feature map as one extra feature. """ def __init__(self, group_size: 'int'=4): """ * `group_size` is the number of samples to calculate standard deviation across. """ super().__init__() self.group_size = group_size def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
MiniBatchStdDev
false
16,557
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
GroupNorm
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class GroupNorm(Module): """ ## Group Normalization Layer """ def __init__(self, groups: 'int', channels: 'int', *, eps: float=1e-05, affine: bool=True): """ * `groups` is the number of groups the features are divided into * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() assert channels % groups == 0, 'Number of channels should be evenly divisible by the number of groups' self.groups = groups self.channels = channels self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(channels)) self.shift = nn.Parameter(torch.zeros(channels)) def forward(self, x: 'torch.Tensor'): """ `x` is a tensor of shape `[batch_size, channels, *]`. `*` denotes any number of (possibly 0) dimensions. For example, in an image (2D) convolution this will be `[batch_size, channels, height, width]` """ x_shape = x.shape batch_size = x_shape[0] assert self.channels == x.shape[1] x = x.view(batch_size, self.groups, -1) mean = x.mean(dim=[-1], keepdim=True) mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True) var = mean_x2 - mean ** 2 x_norm = (x - mean) / torch.sqrt(var + self.eps) if self.affine: x_norm = x_norm.view(batch_size, self.channels, -1) x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1) return x_norm.view(x_shape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'groups': 1, 'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp19 = tmp0 - tmp11 tmp20 = tmp19 / tmp17 tmp21 = tmp18 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp23, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, buf1, buf3 class GroupNormNew(Module): """ ## Group Normalization Layer """ def __init__(self, groups: 'int', channels: 'int', *, eps: float=1e-05, affine: bool=True): """ * `groups` is the number of groups the features are divided into * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() assert channels % groups == 0, 'Number of channels should be evenly divisible by the number of groups' self.groups = groups self.channels = channels self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(channels)) self.shift = nn.Parameter(torch.zeros(channels)) def forward(self, input_0): primals_2 = self.scale primals_3 = self.shift primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
GroupNorm
false
16,558
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SquaredReLU
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SquaredReLU(Module): """ ## Squared ReLU activation $$y = {\\max(x, 0)}^2$$ Squared ReLU is used as the activation function in the [position wise feedforward module](../feed_forward.html). """ def __init__(self): super().__init__() self.relu = nn.ReLU() def forward(self, x: 'torch.Tensor'): x = self.relu(x) return x * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquaredReLUNew(Module): """ ## Squared ReLU activation $$y = {\\max(x, 0)}^2$$ Squared ReLU is used as the activation function in the [position wise feedforward module](../feed_forward.html). """ def __init__(self): super().__init__() self.relu = nn.ReLU() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SquaredReLU
false
16,559
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
LSTMCell
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class LSTMCell(Module): """ ## Long Short-Term Memory Cell LSTM Cell computes $c$, and $h$. $c$ is like the long-term memory, and $h$ is like the short term memory. We use the input $x$ and $h$ to update the long term memory. In the update, some features of $c$ are cleared with a forget gate $f$, and some features $i$ are added through a gate $g$. The new short term memory is the $ anh$ of the long-term memory multiplied by the output gate $o$. Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it. Also $c$ never goes through a linear transformation. This is what solves vanishing and exploding gradients. Here's the update rule. egin{align} c_t &= \\sigma(f_t) \\odot c_{t-1} + \\sigma(i_t) \\odot anh(g_t) \\ h_t &= \\sigma(o_t) \\odot anh(c_t) \\end{align} $\\odot$ stands for element-wise multiplication. Intermediate values and gates are computed as linear transformations of the hidden state and input. egin{align} i_t &= lin_x^i(x_t) + lin_h^i(h_{t-1}) \\ f_t &= lin_x^f(x_t) + lin_h^f(h_{t-1}) \\ g_t &= lin_x^g(x_t) + lin_h^g(h_{t-1}) \\ o_t &= lin_x^o(x_t) + lin_h^o(h_{t-1}) \\end{align} """ def __init__(self, input_size: 'int', hidden_size: 'int', layer_norm: 'bool'=False): super().__init__() self.hidden_lin = nn.Linear(hidden_size, 4 * hidden_size) self.input_lin = nn.Linear(input_size, 4 * hidden_size, bias=False) if layer_norm: self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(4)]) self.layer_norm_c = nn.LayerNorm(hidden_size) else: self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(4)]) self.layer_norm_c = nn.Identity() def forward(self, x: 'torch.Tensor', h: 'torch.Tensor', c: 'torch.Tensor'): ifgo = self.hidden_lin(h) + self.input_lin(x) ifgo = ifgo.chunk(4, dim=-1) ifgo = [self.layer_norm[i](ifgo[i]) for i in range(4)] i, f, g, o = ifgo c_next = torch.sigmoid(f) * c + torch.sigmoid(i) * torch.tanh(g) h_next = torch.sigmoid(o) * torch.tanh(self.layer_norm_c(c_next)) return h_next, c_next def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp18 = tl.load(in_ptr3 + x2, xmask) tmp25 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp26 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp19 = tmp17 * tmp18 tmp20 = tmp5 * tmp11 tmp21 = tmp19 + tmp20 tmp22 = 1.0 tmp23 = tmp22 - tmp17 tmp24 = tmp17 * tmp23 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = libdevice.tanh(tmp21) tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp21, xmask) tl.store(out_ptr3 + x2, tmp24, xmask) tl.store(out_ptr4 + x2, tmp30, xmask) tl.store(out_ptr5 + x2, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(256)]( buf0, primals_2, buf1, primals_6, buf2, buf3, buf4, buf7, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 return buf6, buf4, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0 ), buf2, buf3, buf4, buf5, buf7 class LSTMCellNew(Module): """ ## Long Short-Term Memory Cell LSTM Cell computes $c$, and $h$. $c$ is like the long-term memory, and $h$ is like the short term memory. We use the input $x$ and $h$ to update the long term memory. In the update, some features of $c$ are cleared with a forget gate $f$, and some features $i$ are added through a gate $g$. The new short term memory is the $ anh$ of the long-term memory multiplied by the output gate $o$. Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it. Also $c$ never goes through a linear transformation. This is what solves vanishing and exploding gradients. Here's the update rule. egin{align} c_t &= \\sigma(f_t) \\odot c_{t-1} + \\sigma(i_t) \\odot anh(g_t) \\ h_t &= \\sigma(o_t) \\odot anh(c_t) \\end{align} $\\odot$ stands for element-wise multiplication. Intermediate values and gates are computed as linear transformations of the hidden state and input. egin{align} i_t &= lin_x^i(x_t) + lin_h^i(h_{t-1}) \\ f_t &= lin_x^f(x_t) + lin_h^f(h_{t-1}) \\ g_t &= lin_x^g(x_t) + lin_h^g(h_{t-1}) \\ o_t &= lin_x^o(x_t) + lin_h^o(h_{t-1}) \\end{align} """ def __init__(self, input_size: 'int', hidden_size: 'int', layer_norm: 'bool'=False): super().__init__() self.hidden_lin = nn.Linear(hidden_size, 4 * hidden_size) self.input_lin = nn.Linear(input_size, 4 * hidden_size, bias=False) if layer_norm: self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(4)]) self.layer_norm_c = nn.LayerNorm(hidden_size) else: self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(4)]) self.layer_norm_c = nn.Identity() def forward(self, input_0, input_1, input_2): primals_1 = self.hidden_lin.weight primals_2 = self.hidden_lin.bias primals_4 = self.input_lin.weight primals_3 = input_0 primals_5 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
techthiyanes/annotated_deep_learning_paper_implementations
LSTMCell
false
16,560
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
InstanceNorm
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class InstanceNorm(Module): """ ## Instance Normalization Layer Instance normalization layer $\\text{IN}$ normalizes the input $X$ as follows: When input $X \\in \\mathbb{R}^{B \\times C \\times H \\times W}$ is a batch of image representations, where $B$ is the batch size, $C$ is the number of channels, $H$ is the height and $W$ is the width. $\\gamma \\in \\mathbb{R}^{C}$ and $\\beta \\in \\mathbb{R}^{C}$. The affine transformation with $gamma$ and $beta$ are optional. $$\\text{IN}(X) = \\gamma \\frac{X - \\underset{H, W}{\\mathbb{E}}[X]}{\\sqrt{\\underset{H, W}{Var}[X] + \\epsilon}} + \\beta$$ """ def __init__(self, channels: 'int', *, eps: float=1e-05, affine: bool=True ): """ * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[X] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() self.channels = channels self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(channels)) self.shift = nn.Parameter(torch.zeros(channels)) def forward(self, x: 'torch.Tensor'): """ `x` is a tensor of shape `[batch_size, channels, *]`. `*` denotes any number of (possibly 0) dimensions. For example, in an image (2D) convolution this will be `[batch_size, channels, height, width]` """ x_shape = x.shape batch_size = x_shape[0] assert self.channels == x.shape[1] x = x.view(batch_size, self.channels, -1) mean = x.mean(dim=[-1], keepdim=True) mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True) var = mean_x2 - mean ** 2 x_norm = (x - mean) / torch.sqrt(var + self.eps) x_norm = x_norm.view(batch_size, self.channels, -1) if self.affine: x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1) return x_norm.view(x_shape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_view_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 16.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp19 = tmp0 - tmp11 tmp20 = tmp19 / tmp17 tmp21 = tmp18 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp23, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_pow_sqrt_sub_view_0[grid(16)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 del primals_3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, buf1, buf3 class InstanceNormNew(Module): """ ## Instance Normalization Layer Instance normalization layer $\\text{IN}$ normalizes the input $X$ as follows: When input $X \\in \\mathbb{R}^{B \\times C \\times H \\times W}$ is a batch of image representations, where $B$ is the batch size, $C$ is the number of channels, $H$ is the height and $W$ is the width. $\\gamma \\in \\mathbb{R}^{C}$ and $\\beta \\in \\mathbb{R}^{C}$. The affine transformation with $gamma$ and $beta$ are optional. $$\\text{IN}(X) = \\gamma \\frac{X - \\underset{H, W}{\\mathbb{E}}[X]}{\\sqrt{\\underset{H, W}{Var}[X] + \\epsilon}} + \\beta$$ """ def __init__(self, channels: 'int', *, eps: float=1e-05, affine: bool=True ): """ * `channels` is the number of features in the input * `eps` is $\\epsilon$, used in $\\sqrt{Var[X] + \\epsilon}$ for numerical stability * `affine` is whether to scale and shift the normalized value """ super().__init__() self.channels = channels self.eps = eps self.affine = affine if self.affine: self.scale = nn.Parameter(torch.ones(channels)) self.shift = nn.Parameter(torch.zeros(channels)) def forward(self, input_0): primals_2 = self.scale primals_3 = self.shift primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
InstanceNorm
false
16,561
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Conv2d
import torch from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd def weight_standardization(weight: 'torch.Tensor', eps: 'float'): """ ## Weight Standardization $$\\hat{W}_{i,j} = \\frac{W_{i,j} - \\mu_{W_{i,\\cdot}}} {\\sigma_{W_{i,\\cdot}}}$$ where, \\begin{align} W &\\in \\mathbb{R}^{O \\times I} \\\\ \\mu_{W_{i,\\cdot}} &= \\frac{1}{I} \\sum_{j=1}^I W_{i,j} \\\\ \\sigma_{W_{i,\\cdot}} &= \\sqrt{\\frac{1}{I} \\sum_{j=1}^I W^2_{i,j} - \\mu^2_{W_{i,\\cdot}} + \\epsilon} \\\\ \\end{align} for a 2D-convolution layer $O$ is the number of output channels ($O = C_{out}$) and $I$ is the number of input channels times the kernel size ($I = C_{in} \\times k_H \\times k_W$) """ c_out, c_in, *kernel_shape = weight.shape weight = weight.view(c_out, -1) var, mean = torch.var_mean(weight, dim=1, keepdim=True) weight = (weight - mean) / torch.sqrt(var + eps) return weight.view(c_out, c_in, *kernel_shape) class Conv2d(nn.Conv2d): """ ## 2D Convolution Layer This extends the standard 2D Convolution layer and standardize the weights before the convolution step. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups: 'int'=1, bias: 'bool'=True, padding_mode: 'str'='zeros', eps: 'float'=1e-05): super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias, padding_mode=padding_mode) self.eps = eps def forward(self, x: 'torch.Tensor'): return F.conv2d(x, weight_standardization(self.weight, self.eps), self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 63.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1), (1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3, primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf5 = extern_kernels.convolution(primals_3, reinterpret_tensor( buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_1[grid(16)](buf6, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf6, primals_1, primals_3, buf3, buf4 def weight_standardization(weight: 'torch.Tensor', eps: 'float'): """ ## Weight Standardization $$\\hat{W}_{i,j} = \\frac{W_{i,j} - \\mu_{W_{i,\\cdot}}} {\\sigma_{W_{i,\\cdot}}}$$ where, \\begin{align} W &\\in \\mathbb{R}^{O \\times I} \\\\ \\mu_{W_{i,\\cdot}} &= \\frac{1}{I} \\sum_{j=1}^I W_{i,j} \\\\ \\sigma_{W_{i,\\cdot}} &= \\sqrt{\\frac{1}{I} \\sum_{j=1}^I W^2_{i,j} - \\mu^2_{W_{i,\\cdot}} + \\epsilon} \\\\ \\end{align} for a 2D-convolution layer $O$ is the number of output channels ($O = C_{out}$) and $I$ is the number of input channels times the kernel size ($I = C_{in} \\times k_H \\times k_W$) """ c_out, c_in, *kernel_shape = weight.shape weight = weight.view(c_out, -1) var, mean = torch.var_mean(weight, dim=1, keepdim=True) weight = (weight - mean) / torch.sqrt(var + eps) return weight.view(c_out, c_in, *kernel_shape) class Conv2dNew(nn.Conv2d): """ ## 2D Convolution Layer This extends the standard 2D Convolution layer and standardize the weights before the convolution step. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups: 'int'=1, bias: 'bool'=True, padding_mode: 'str'='zeros', eps: 'float'=1e-05): super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode) self.eps = eps def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
Conv2d
false
16,562
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SelfAttention
import torch class SelfAttention(torch.nn.Module): def __init__(self, num_heads, model_dim, dropout_keep_prob): super(SelfAttention, self).__init__() self.num_heads = num_heads self.model_dim = model_dim self.dropout_keep_prob = dropout_keep_prob self.q_layer = torch.nn.Linear(model_dim, model_dim * self. num_heads, bias=False) self.out_layer = torch.nn.Linear(model_dim * self.num_heads, model_dim, bias=False) self.out_layer2 = torch.nn.Linear(model_dim * 2, model_dim, bias=False) self.relu = torch.nn.ReLU() self.softmax = torch.nn.Softmax(dim=-1) self.dropout = torch.nn.Dropout(1 - dropout_keep_prob) def forward(self, batched_inputs, attn_mask=None): q = self._linear_projection(batched_inputs) qs = self._split_heads(q) tiled_inputs = batched_inputs.unsqueeze(1).repeat(1, self.num_heads, 1, 1) outputs = self._scaled_dot_product(qs, tiled_inputs, tiled_inputs, attn_mask) outputs = self._concat_heads(outputs) if self.num_heads > 1: outputs = self.out_layer(outputs) outputs = self.relu(outputs) outputs = torch.cat([outputs, batched_inputs], dim=-1) outputs = self.out_layer2(outputs) outputs = self.relu(outputs) return outputs def _linear_projection(self, batched_inputs): q = self.q_layer(batched_inputs) return q def _split_heads(self, q): def split_last_dimension_then_transpose(tensor, num_heads, dim): tensor = tensor.view([-1, tensor.size()[1], num_heads, dim]) return tensor.transpose(1, 2) qs = split_last_dimension_then_transpose(q, self.num_heads, self. model_dim) return qs def _scaled_dot_product(self, qs, ks, tiled_inputs, valid_mask): queries_dot_keys = torch.matmul(qs, ks.transpose(2, 3)) scaled_scores = queries_dot_keys if valid_mask is not None: mask = torch.log(valid_mask.view(valid_mask.size()[0], 1, 1, valid_mask.size()[1])) scaled_scores += mask attention_weights = self.softmax(scaled_scores) return torch.matmul(attention_weights, tiled_inputs) def _concat_heads(self, outputs): max_contexts = outputs.size()[2] tensor = outputs.transpose(1, 2) return tensor.contiguous().view([-1, max_contexts, self.model_dim * self.num_heads]) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_heads': 4, 'model_dim': 4, 'dropout_keep_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(256)](primals_2, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = buf2 del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf6) buf7 = buf5 del buf5 triton_poi_fused_clone_1[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_4[grid(128)](buf8, primals_2, buf9, 128, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (16, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_6[grid(64)](buf8, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 return buf11, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), buf1, buf3, reinterpret_tensor(buf7, (16, 16), (16, 1), 0 ), reinterpret_tensor(buf9, (16, 8), (8, 1), 0 ), buf12, primals_4, buf13, primals_3 class SelfAttentionNew(torch.nn.Module): def __init__(self, num_heads, model_dim, dropout_keep_prob): super(SelfAttentionNew, self).__init__() self.num_heads = num_heads self.model_dim = model_dim self.dropout_keep_prob = dropout_keep_prob self.q_layer = torch.nn.Linear(model_dim, model_dim * self. num_heads, bias=False) self.out_layer = torch.nn.Linear(model_dim * self.num_heads, model_dim, bias=False) self.out_layer2 = torch.nn.Linear(model_dim * 2, model_dim, bias=False) self.relu = torch.nn.ReLU() self.softmax = torch.nn.Softmax(dim=-1) self.dropout = torch.nn.Dropout(1 - dropout_keep_prob) def _linear_projection(self, batched_inputs): q = self.q_layer(batched_inputs) return q def _split_heads(self, q): def split_last_dimension_then_transpose(tensor, num_heads, dim): tensor = tensor.view([-1, tensor.size()[1], num_heads, dim]) return tensor.transpose(1, 2) qs = split_last_dimension_then_transpose(q, self.num_heads, self. model_dim) return qs def _scaled_dot_product(self, qs, ks, tiled_inputs, valid_mask): queries_dot_keys = torch.matmul(qs, ks.transpose(2, 3)) scaled_scores = queries_dot_keys if valid_mask is not None: mask = torch.log(valid_mask.view(valid_mask.size()[0], 1, 1, valid_mask.size()[1])) scaled_scores += mask attention_weights = self.softmax(scaled_scores) return torch.matmul(attention_weights, tiled_inputs) def _concat_heads(self, outputs): max_contexts = outputs.size()[2] tensor = outputs.transpose(1, 2) return tensor.contiguous().view([-1, max_contexts, self.model_dim * self.num_heads]) def forward(self, input_0): primals_1 = self.q_layer.weight primals_3 = self.out_layer.weight primals_4 = self.out_layer2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
tech-srl/bottleneck
SelfAttention
false
16,563
[ "MIT" ]
56
b8c629ad25e02f53ba3389dd33a90bbeb83ea447
https://github.com/tech-srl/bottleneck/tree/b8c629ad25e02f53ba3389dd33a90bbeb83ea447
EnDeWithPooling
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class EnDeWithPooling(nn.Module): def __init__(self, activation, initType, numChannels, batchnorm=False, softmax=False): super(EnDeWithPooling, self).__init__() self.batchnorm = batchnorm self.bias = not batchnorm self.initType = initType self.activation = None self.numChannels = numChannels self.softmax = softmax if activation == 'relu': self.activation = nn.ReLU(inplace=True) else: self.activation = nn.SELU(inplace=True) self.conv1 = nn.Conv2d(self.numChannels, 16, 3, 1, 1, bias=self.bias) self.conv2 = nn.Conv2d(16, 32, 3, 1, 1, bias=self.bias) self.conv3 = nn.Conv2d(32, 64, 3, 1, 1, bias=self.bias) self.deconv3 = nn.ConvTranspose2d(64, 32, 3, 2, 1, 1) self.deconv2 = nn.ConvTranspose2d(32, 16, 3, 2, 1, 1) self.deconv1 = nn.ConvTranspose2d(16, 8, 3, 2, 1, 1) self.classifier = nn.Conv2d(8, 1, 1) self.pool = nn.MaxPool2d(2, 2) self.intermediate = nn.Conv2d(64, 64, 1, 1, 0, bias=self.bias) self.skip1 = nn.Conv2d(16, 16, 1, 1, 0, bias=self.bias) self.skip2 = nn.Conv2d(32, 32, 1, 1, 0, bias=self.bias) if self.batchnorm: self.bn1 = nn.BatchNorm2d(16) self.bn2 = nn.BatchNorm2d(32) self.bn3 = nn.BatchNorm2d(64) self.bn4 = nn.BatchNorm2d(32) self.bn5 = nn.BatchNorm2d(16) self.bn6 = nn.BatchNorm2d(8) def forward(self, x): if self.batchnorm: conv1_ = self.pool(self.bn1(self.activation(self.conv1(x)))) conv2_ = self.pool(self.bn2(self.activation(self.conv2(conv1_)))) conv3_ = self.pool(self.bn3(self.activation(self.conv3(conv2_)))) intermediate_ = self.activation(self.intermediate(conv3_)) skip_deconv3_ = self.deconv3(intermediate_) + self.activation(self .skip2(conv2_)) deconv3_ = self.bn4(self.activation(skip_deconv3_)) skip_deconv2_ = self.deconv2(deconv3_) + self.activation(self. skip1(conv1_)) deconv2_ = self.bn5(self.activation(skip_deconv2_)) deconv1_ = self.bn6(self.activation(self.deconv1(deconv2_))) score = self.classifier(deconv1_) else: conv1_ = self.pool(self.activation(self.conv1(x))) conv2_ = self.pool(self.activation(self.conv2(conv1_))) conv3_ = self.pool(self.activation(self.conv3(conv2_))) intermediate_ = self.activation(self.intermediate(conv3_)) skip_deconv3_ = self.deconv3(intermediate_) + self.activation(self .skip2(conv2_)) deconv3_ = self.activation(skip_deconv3_) skip_deconv2_ = self.deconv2(deconv3_) + self.activation(self. skip1(conv1_)) deconv2_ = self.activation(skip_deconv2_) deconv1_ = self.activation(self.deconv1(deconv2_)) if self.softmax: score = F.softmax(self.classifier(deconv1_), dim=1) else: score = self.classifier(deconv1_) return score def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'activation': 4, 'initType': 4, 'numChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_add_convolution_elu_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, None) tmp4 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0507009873554805 tmp9 = tmp2 * tmp8 tmp10 = 1.0 tmp11 = tmp2 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = 1.7580993408473766 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp7, tmp9, tmp14) tmp16 = tmp5 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tmp16 * tmp10 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp13 tmp22 = tl.where(tmp17, tmp18, tmp21) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(in_out_ptr1 + x3, tmp22, None) @triton.jit def triton_poi_fused_add_convolution_elu_8(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, None) tmp4 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0507009873554805 tmp9 = tmp2 * tmp8 tmp10 = 1.0 tmp11 = tmp2 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = 1.7580993408473766 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp7, tmp9, tmp14) tmp16 = tmp5 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tmp16 * tmp10 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp13 tmp22 = tl.where(tmp17, tmp18, tmp21) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(in_out_ptr1 + x3, tmp22, None) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (32, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_13, (32,), (1,)) assert_size_stride(primals_14, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (16,), (1,)) assert_size_stride(primals_16, (16, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_17, (16,), (1,)) assert_size_stride(primals_18, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_19, (8,), (1,)) assert_size_stride(primals_20, (1, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_21, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2, buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_elu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 16, 16), (16384, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_elu_4[grid(65536)](buf9, primals_7, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch. float32) buf11 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf9, buf10, buf11, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 8, 8), (4096, 64, 8, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_elu_6[grid(16384)](buf13, primals_9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf14, (4, 32, 16, 16), (8192, 256, 16, 1)) buf15 = extern_kernels.convolution(buf6, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 32, 16, 16), (8192, 256, 16, 1)) buf16 = buf15 del buf15 buf17 = buf14 del buf14 triton_poi_fused_add_convolution_elu_7[grid(32768)](buf16, buf17, primals_13, primals_11, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 del primals_13 buf18 = extern_kernels.convolution(buf17, primals_14, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf18, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf19 = extern_kernels.convolution(buf2, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf20 = buf19 del buf19 buf21 = buf18 del buf18 triton_poi_fused_add_convolution_elu_8[grid(65536)](buf20, buf21, primals_17, primals_15, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf22, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_elu_9[grid(131072)](buf23, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_10[grid(16384)](buf25, primals_21, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 return (buf25, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf16, buf17, buf20, buf21, buf23) class EnDeWithPoolingNew(nn.Module): def __init__(self, activation, initType, numChannels, batchnorm=False, softmax=False): super(EnDeWithPoolingNew, self).__init__() self.batchnorm = batchnorm self.bias = not batchnorm self.initType = initType self.activation = None self.numChannels = numChannels self.softmax = softmax if activation == 'relu': self.activation = nn.ReLU(inplace=True) else: self.activation = nn.SELU(inplace=True) self.conv1 = nn.Conv2d(self.numChannels, 16, 3, 1, 1, bias=self.bias) self.conv2 = nn.Conv2d(16, 32, 3, 1, 1, bias=self.bias) self.conv3 = nn.Conv2d(32, 64, 3, 1, 1, bias=self.bias) self.deconv3 = nn.ConvTranspose2d(64, 32, 3, 2, 1, 1) self.deconv2 = nn.ConvTranspose2d(32, 16, 3, 2, 1, 1) self.deconv1 = nn.ConvTranspose2d(16, 8, 3, 2, 1, 1) self.classifier = nn.Conv2d(8, 1, 1) self.pool = nn.MaxPool2d(2, 2) self.intermediate = nn.Conv2d(64, 64, 1, 1, 0, bias=self.bias) self.skip1 = nn.Conv2d(16, 16, 1, 1, 0, bias=self.bias) self.skip2 = nn.Conv2d(32, 32, 1, 1, 0, bias=self.bias) if self.batchnorm: self.bn1 = nn.BatchNorm2d(16) self.bn2 = nn.BatchNorm2d(32) self.bn3 = nn.BatchNorm2d(64) self.bn4 = nn.BatchNorm2d(32) self.bn5 = nn.BatchNorm2d(16) self.bn6 = nn.BatchNorm2d(8) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_10 = self.deconv3.weight primals_11 = self.deconv3.bias primals_14 = self.deconv2.weight primals_15 = self.deconv2.bias primals_18 = self.deconv1.weight primals_19 = self.deconv1.bias primals_20 = self.classifier.weight primals_21 = self.classifier.bias primals_8 = self.intermediate.weight primals_9 = self.intermediate.bias primals_16 = self.skip1.weight primals_17 = self.skip1.bias primals_12 = self.skip2.weight primals_13 = self.skip2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
talsperre/INFER
EnDeWithPooling
false
16,564
[ "MIT" ]
56
38fb2356700c5a92991788b7eb9a267c99a07c5b
https://github.com/talsperre/INFER/tree/38fb2356700c5a92991788b7eb9a267c99a07c5b
SpatialDepthWiseSharedConvolution
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWiseSharedConvolution(Module): """ ## Spatial Depth Wise Shared Convolution We share the same kernel across all channels. """ def __init__(self, kernel_size: 'int'=3): """ """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=( kernel_size,), padding=(kernel_size - 1,)) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ seq_len, batch_size, heads, d_k = x.shape x = x.permute(1, 2, 3, 0) x = x.view(batch_size * heads * d_k, 1, seq_len) x = self.conv(x) x = x[:, :, :-(self.kernel_size - 1)] x = x.view(batch_size, heads, d_k, seq_len) x = x.permute(3, 0, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (64, 1, 6), (6, 6, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(384)](buf2, primals_3, 384, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0 ), primals_2, reinterpret_tensor(primals_1, (64, 1, 4), (1, 256, 64), 0 ) class SpatialDepthWiseSharedConvolutionNew(Module): """ ## Spatial Depth Wise Shared Convolution We share the same kernel across all channels. """ def __init__(self, kernel_size: 'int'=3): """ """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=( kernel_size,), padding=(kernel_size - 1,)) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpatialDepthWiseSharedConvolution
false
16,565
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
DownSample
import torch from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class DownSample(nn.Module): """ <a id="down_sample"></a> ### Down-sample The down-sample operation [smoothens](#smooth) each feature channel and scale $2 imes$ using bilinear interpolation. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.smooth = Smooth() def forward(self, x: 'torch.Tensor'): x = self.smooth(x) return F.interpolate(x, (x.shape[2] // 2, x.shape[3] // 2), mode= 'bilinear', align_corners=False) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x2 = xindex // 4 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + x3, tmp44, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(576)](arg0_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf4 = buf2 del buf2 buf5 = buf4 del buf4 triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1[grid (64)](buf5, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf5, class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class DownSampleNew(nn.Module): """ <a id="down_sample"></a> ### Down-sample The down-sample operation [smoothens](#smooth) each feature channel and scale $2 imes$ using bilinear interpolation. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.smooth = Smooth() def forward(self, input_0): arg1_1 = self.smooth.kernel arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
DownSample
false
16,566
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Smooth
import torch from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(576)](arg0_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), class SmoothNew(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, input_0): arg1_1 = self.kernel arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
Smooth
false
16,567
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
ATLoss
import torch from torch import Tensor import torch.nn as nn import torch.nn.functional as F class ATLoss(nn.Module): def __init__(self): super().__init__() def forward(self, logits: 'Tensor', labels: 'Tensor') ->float: """ Args: logits: predicted probabilities (shape: batch size x num classes) labels: one-hot encoded true labels (shape: batch size x num classes) """ th_label = torch.zeros_like(labels, dtype=torch.float) th_label[:, 0] = 1.0 labels[:, 0] = 0.0 p_mask = labels + th_label n_mask = 1 - labels logit1 = logits - (1 - p_mask) * 1e+30 loss1 = -(F.log_softmax(logit1, dim=-1) * labels).sum(1) logit2 = logits - (1 - n_mask) * 1e+30 loss2 = -(F.log_softmax(logit2, dim=-1) * th_label).sum(1) loss = loss1 + loss2 loss = loss.mean() return loss def get_label(self, logits: 'Tensor', num_labels: 'int'=-1, threshold: 'float'=None) ->Tensor: """ Calculated the labels """ if threshold: th_logit = torch.full((len(logits), 1), threshold) else: th_logit = logits[:, 0].unsqueeze(1) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output.sum(1) == 0.0 return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 tmp0 = 0.0 tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1( in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = x1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp5 = 1.0 tmp6 = 0.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp1 + tmp7 tmp9 = tmp5 - tmp8 tmp10 = 1e+30 tmp11 = tmp9 * tmp10 tmp12 = tmp0 - tmp11 tmp15 = tmp14 + tmp7 tmp16 = tmp5 - tmp15 tmp17 = tmp16 * tmp10 tmp18 = tmp13 - tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp22 = tmp21 + tmp7 tmp23 = tmp5 - tmp22 tmp24 = tmp23 * tmp10 tmp25 = tmp20 - tmp24 tmp26 = triton_helpers.maximum(tmp19, tmp25) tmp29 = tmp28 + tmp7 tmp30 = tmp5 - tmp29 tmp31 = tmp30 * tmp10 tmp32 = tmp27 - tmp31 tmp33 = triton_helpers.maximum(tmp26, tmp32) tmp34 = tmp12 - tmp33 tmp35 = tl_math.exp(tmp34) tmp36 = tmp18 - tmp33 tmp37 = tl_math.exp(tmp36) tmp38 = tmp35 + tmp37 tmp39 = tmp25 - tmp33 tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tmp42 = tmp32 - tmp33 tmp43 = tl_math.exp(tmp42) tmp44 = tmp41 + tmp43 tmp45 = tmp5 - tmp1 tmp46 = tmp5 - tmp45 tmp47 = tmp46 * tmp10 tmp48 = tmp0 - tmp47 tmp49 = tmp5 - tmp14 tmp50 = tmp5 - tmp49 tmp51 = tmp50 * tmp10 tmp52 = tmp13 - tmp51 tmp53 = triton_helpers.maximum(tmp48, tmp52) tmp54 = tmp5 - tmp21 tmp55 = tmp5 - tmp54 tmp56 = tmp55 * tmp10 tmp57 = tmp20 - tmp56 tmp58 = triton_helpers.maximum(tmp53, tmp57) tmp59 = tmp5 - tmp28 tmp60 = tmp5 - tmp59 tmp61 = tmp60 * tmp10 tmp62 = tmp27 - tmp61 tmp63 = triton_helpers.maximum(tmp58, tmp62) tmp64 = tmp48 - tmp63 tmp65 = tl_math.exp(tmp64) tmp66 = tmp52 - tmp63 tmp67 = tl_math.exp(tmp66) tmp68 = tmp65 + tmp67 tmp69 = tmp57 - tmp63 tmp70 = tl_math.exp(tmp69) tmp71 = tmp68 + tmp70 tmp72 = tmp62 - tmp63 tmp73 = tl_math.exp(tmp72) tmp74 = tmp71 + tmp73 tl.store(out_ptr0 + x3, tmp33, xmask) tl.store(out_ptr1 + x3, tmp44, xmask) tl.store(out_ptr2 + x3, tmp63, xmask) tl.store(out_ptr3 + x3, tmp74, xmask) @triton.jit def triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex // 16 r4 = rindex % 16 r1 = rindex // 4 % 4 tmp0 = tl.load(in_ptr0 + (r4 + 64 * r2), None) tmp1 = tl.load(in_ptr1 + (r4 + 64 * r2), None) tmp12 = tl.load(in_ptr2 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr3 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), None) tmp19 = tl.load(in_ptr1 + (16 + r4 + 64 * r2), None) tmp27 = tl.load(in_ptr2 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr3 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), None) tmp35 = tl.load(in_ptr1 + (32 + r4 + 64 * r2), None) tmp43 = tl.load(in_ptr2 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp45 = tl.load(in_ptr3 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), None) tmp51 = tl.load(in_ptr1 + (48 + r4 + 64 * r2), None) tmp59 = tl.load(in_ptr2 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp61 = tl.load(in_ptr3 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr4 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp72 = tl.load(in_ptr5 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp80 = tl.load(in_ptr4 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp82 = tl.load(in_ptr5 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp91 = tl.load(in_ptr4 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp93 = tl.load(in_ptr5 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp102 = tl.load(in_ptr4 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp104 = tl.load(in_ptr5 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = tmp2 == tmp2 tmp4 = 1.0 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp1 + tmp6 tmp8 = tmp4 - tmp7 tmp9 = 1e+30 tmp10 = tmp8 * tmp9 tmp11 = tmp0 - tmp10 tmp13 = tmp11 - tmp12 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 - tmp15 tmp17 = tmp16 * tmp1 tmp20 = tl.full([1, 1], 1, tl.int32) tmp21 = tmp20 == tmp2 tmp22 = tl.where(tmp21, tmp4, tmp5) tmp23 = tmp19 + tmp22 tmp24 = tmp4 - tmp23 tmp25 = tmp24 * tmp9 tmp26 = tmp18 - tmp25 tmp28 = tmp26 - tmp27 tmp30 = tl_math.log(tmp29) tmp31 = tmp28 - tmp30 tmp32 = tmp31 * tmp19 tmp33 = tmp17 + tmp32 tmp36 = tl.full([1, 1], 2, tl.int32) tmp37 = tmp36 == tmp2 tmp38 = tl.where(tmp37, tmp4, tmp5) tmp39 = tmp35 + tmp38 tmp40 = tmp4 - tmp39 tmp41 = tmp40 * tmp9 tmp42 = tmp34 - tmp41 tmp44 = tmp42 - tmp43 tmp46 = tl_math.log(tmp45) tmp47 = tmp44 - tmp46 tmp48 = tmp47 * tmp35 tmp49 = tmp33 + tmp48 tmp52 = tl.full([1, 1], 3, tl.int32) tmp53 = tmp52 == tmp2 tmp54 = tl.where(tmp53, tmp4, tmp5) tmp55 = tmp51 + tmp54 tmp56 = tmp4 - tmp55 tmp57 = tmp56 * tmp9 tmp58 = tmp50 - tmp57 tmp60 = tmp58 - tmp59 tmp62 = tl_math.log(tmp61) tmp63 = tmp60 - tmp62 tmp64 = tmp63 * tmp51 tmp65 = tmp49 + tmp64 tmp66 = tmp4 - tmp1 tmp67 = tmp4 - tmp66 tmp68 = tmp67 * tmp9 tmp69 = tmp0 - tmp68 tmp71 = tmp69 - tmp70 tmp73 = tl_math.log(tmp72) tmp74 = tmp71 - tmp73 tmp75 = tmp74 * tmp6 tmp76 = tmp4 - tmp19 tmp77 = tmp4 - tmp76 tmp78 = tmp77 * tmp9 tmp79 = tmp18 - tmp78 tmp81 = tmp79 - tmp80 tmp83 = tl_math.log(tmp82) tmp84 = tmp81 - tmp83 tmp85 = tmp84 * tmp22 tmp86 = tmp75 + tmp85 tmp87 = tmp4 - tmp35 tmp88 = tmp4 - tmp87 tmp89 = tmp88 * tmp9 tmp90 = tmp34 - tmp89 tmp92 = tmp90 - tmp91 tmp94 = tl_math.log(tmp93) tmp95 = tmp92 - tmp94 tmp96 = tmp95 * tmp38 tmp97 = tmp86 + tmp96 tmp98 = tmp4 - tmp51 tmp99 = tmp4 - tmp98 tmp100 = tmp99 * tmp9 tmp101 = tmp50 - tmp100 tmp103 = tmp101 - tmp102 tmp105 = tl_math.log(tmp104) tmp106 = tmp103 - tmp105 tmp107 = tmp106 * tmp54 tmp108 = tmp97 + tmp107 tmp109 = -tmp65 tmp110 = -tmp108 tmp111 = tmp109 + tmp110 tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK]) tmp114 = tl.sum(tmp112, 1)[:, None] tmp115 = 64.0 tmp116 = tmp114 / tmp115 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp116, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_fill_lift_fresh_0[grid(64)](arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1[ grid(64)](arg1_1, arg0_1, buf1, buf2, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2[ grid(1)](buf8, arg1_1, arg0_1, buf1, buf2, buf4, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 del buf2 del buf4 del buf5 return buf8, class ATLossNew(nn.Module): def __init__(self): super().__init__() def get_label(self, logits: 'Tensor', num_labels: 'int'=-1, threshold: 'float'=None) ->Tensor: """ Calculated the labels """ if threshold: th_logit = torch.full((len(logits), 1), threshold) else: th_logit = logits[:, 0].unsqueeze(1) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output.sum(1) == 0.0 return output def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/DeepPavlov
ATLoss
false
16,568
[ "Apache-2.0" ]
5,893
08555428388fed3c7b036c0a82a70a25efcabcff
https://github.com/techthiyanes/DeepPavlov/tree/08555428388fed3c7b036c0a82a70a25efcabcff
SpatialDepthWisePerHeadConvolution
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWisePerHeadConvolution(Module): """ ## Spatial Depth Wise Per Head Convolution """ def __init__(self, heads: 'int', d_k: 'int', kernel_size: 'int'=3): """ * `heads` is the number of heads * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ seq_len, batch_size, heads, d_k = x.shape x = x.permute(1, 2, 3, 0) x = x.view(batch_size, heads * d_k, seq_len) x = self.conv(x) x = x[:, :, :-(self.kernel_size - 1)] x = x.view(batch_size, heads, d_k, seq_len) x = x.permute(3, 0, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'heads': 4, 'd_k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=16, bias=None) assert_size_stride(buf1, (4, 16, 6), (96, 6, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(384)](buf2, primals_3, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 16, 4), (16, 1, 64), 0) class SpatialDepthWisePerHeadConvolutionNew(Module): """ ## Spatial Depth Wise Per Head Convolution """ def __init__(self, heads: 'int', d_k: 'int', kernel_size: 'int'=3): """ * `heads` is the number of heads * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpatialDepthWisePerHeadConvolution
false
16,569
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Squash
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class Squash(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, s: 'torch.Tensor'): """ The shape of `s` is `[batch_size, n_capsules, n_features]` """ s2 = (s ** 2).sum(dim=-1, keepdims=True) return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquashNew(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
Squash
false
16,570
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SpacialGatingUnit
import torch from torch import nn import torch.utils.data from typing import Optional import torch.nn.functional import torch.autograd class SpacialGatingUnit(nn.Module): """ ## Spatial Gating Unit $$s(Z) = Z_1 \\odot f_{W,b}(Z_2)$$ where $f_{W,b}(Z) = W Z + b$ is a linear transformation along the sequence dimension, and $\\odot$ is element-wise multiplication. $Z$ is split into to parts of equal size $Z_1$ and $Z_2$ along the channel dimension (embedding dimension). """ def __init__(self, d_z: 'int', seq_len: 'int'): """ * `d_z` is the dimensionality of $Z$ * `seq_len` is the sequence length """ super().__init__() self.norm = nn.LayerNorm([d_z // 2]) self.weight = nn.Parameter(torch.zeros(seq_len, seq_len).uniform_(- 0.01, 0.01), requires_grad=True) self.bias = nn.Parameter(torch.ones(seq_len), requires_grad=True) def forward(self, z: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None): """ * `z` is the input $Z$ of shape `[seq_len, batch_size, d_z]` * `mask` is is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens among each other. The last dimension of size `1` is the batch, which we have in other transformer implementations and was left for compatibility. """ seq_len = z.shape[0] z1, z2 = torch.chunk(z, 2, dim=-1) if mask is not None: assert mask.shape[0] == 1 or mask.shape[0] == seq_len assert mask.shape[1] == seq_len assert mask.shape[2] == 1 mask = mask[:, :, 0] z2 = self.norm(z2) weight = self.weight[:seq_len, :seq_len] if mask is not None: weight = weight * mask z2 = torch.einsum('ij,jbd->ibd', weight, z2) + self.bias[:seq_len, None, None] return z1 * z2 def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_z': 4, 'seq_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp3 - tmp1 tmp5 = tmp4 * tmp4 tmp7 = tmp6 - tmp1 tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = 2.0 tmp11 = tmp9 / tmp10 tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp2 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x3 = xindex // 2 x4 = xindex x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x3), xmask) tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp2 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(32)](primals_1, buf0, primals_2, primals_3, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf0 del primals_2 del primals_3 buf3 = empty_strided_cuda((1, 4, 8), (32, 8, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (1, 4, 8), (0, 8, 1), 0), out=buf3 ) buf4 = reinterpret_tensor(buf3, (4, 4, 2), (8, 2, 1), 0) del buf3 triton_poi_fused_add_mul_2[grid(32)](buf4, primals_1, primals_5, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 return buf4, reinterpret_tensor(primals_1, (4, 4, 2), (16, 4, 1), 0 ), buf1, reinterpret_tensor(primals_4, (1, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (1, 8, 4), (32, 1, 8), 0) class SpacialGatingUnitNew(nn.Module): """ ## Spatial Gating Unit $$s(Z) = Z_1 \\odot f_{W,b}(Z_2)$$ where $f_{W,b}(Z) = W Z + b$ is a linear transformation along the sequence dimension, and $\\odot$ is element-wise multiplication. $Z$ is split into to parts of equal size $Z_1$ and $Z_2$ along the channel dimension (embedding dimension). """ def __init__(self, d_z: 'int', seq_len: 'int'): """ * `d_z` is the dimensionality of $Z$ * `seq_len` is the sequence length """ super().__init__() self.norm = nn.LayerNorm([d_z // 2]) self.weight = nn.Parameter(torch.zeros(seq_len, seq_len).uniform_(- 0.01, 0.01), requires_grad=True) self.bias = nn.Parameter(torch.ones(seq_len), requires_grad=True) def forward(self, input_0): primals_4 = self.weight primals_5 = self.bias primals_2 = self.norm.weight primals_3 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpacialGatingUnit
false
16,571
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SpatialDepthWiseConvolution
from torch.nn import Module import math import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWiseConvolution(Module): """ ## Spatial Depth Wise Convolution This is actually slower """ def __init__(self, d_k: 'int', kernel_size: 'int'=3): """ * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size rng = 1 / math.sqrt(kernel_size) self.kernels = nn.Parameter(torch.zeros((kernel_size, d_k)). uniform_(-rng, rng)) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ res = x * self.kernels[0].view(1, 1, 1, -1) for i in range(1, len(self.kernels)): res[i:] += x[:-i] * self.kernels[i].view(1, 1, 1, -1) return res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_k': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x4 = xindex x0 = xindex % 4 tmp61 = tl.load(in_ptr0 + x4, xmask) tmp62 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 >= tmp3 tmp5 = tmp4 & tmp2 tmp6 = tmp4 & tmp5 tmp7 = tl.load(in_ptr0 + x4, tmp6 & xmask, other=0.0) tmp8 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (-64 + x4), tmp6 & xmask, other=0.0) tmp11 = tl.load(in_ptr1 + (4 + x0), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.load(in_ptr0 + x4, tmp5 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x0, tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.where(tmp4, tmp15, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp5, tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (-64 + x4), tmp5 & xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (4 + x0), tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp5, tmp25, tmp26) tmp28 = tl.load(in_ptr0 + x4, tmp2 & xmask, other=0.0) tmp29 = tl.load(in_ptr1 + x0, tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp28 * tmp29 tmp31 = tl.where(tmp4, tmp27, tmp30) tmp32 = tl.where(tmp4, tmp21, tmp31) tmp33 = tl.load(in_ptr0 + (-128 + x4), tmp2 & xmask, other=0.0) tmp34 = tl.load(in_ptr1 + (8 + x0), tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp4 & tmp4 tmp40 = tl.load(in_ptr0 + x4, tmp39 & xmask, other=0.0) tmp41 = tl.load(in_ptr1 + x0, tmp39 & xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tmp40 * tmp41 tmp43 = tl.load(in_ptr0 + (-64 + x4), tmp39 & xmask, other=0.0) tmp44 = tl.load(in_ptr1 + (4 + x0), tmp39 & xmask, eviction_policy= 'evict_last', other=0.0) tmp45 = tmp43 * tmp44 tmp46 = tmp42 + tmp45 tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp39, tmp46, tmp47) tmp49 = tl.load(in_ptr0 + x4, tmp4 & xmask, other=0.0) tmp50 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tmp49 * tmp50 tmp52 = tl.where(tmp4, tmp48, tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp4, tmp52, tmp53) tmp55 = tl.load(in_ptr0 + (-64 + x4), tmp4 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + (4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tmp55 * tmp56 tmp58 = tmp51 + tmp57 tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype) tmp60 = tl.where(tmp4, tmp58, tmp59) tmp63 = tmp61 * tmp62 tmp64 = tl.where(tmp4, tmp60, tmp63) tmp65 = tl.where(tmp4, tmp54, tmp64) tmp66 = tl.where(tmp2, tmp38, tmp65) tmp67 = tl.where(tmp2, tmp66, tmp66) tl.store(in_out_ptr0 + x4, tmp67, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (3, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf1, primals_2, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf1, primals_2 class SpatialDepthWiseConvolutionNew(Module): """ ## Spatial Depth Wise Convolution This is actually slower """ def __init__(self, d_k: 'int', kernel_size: 'int'=3): """ * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size rng = 1 / math.sqrt(kernel_size) self.kernels = nn.Parameter(torch.zeros((kernel_size, d_k)). uniform_(-rng, rng)) def forward(self, input_0): primals_1 = self.kernels primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpatialDepthWiseConvolution
false
16,572
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
PatchEmbeddings
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class PatchEmbeddings(Module): """ <a id="PatchEmbeddings"></a> ## Get patch embeddings The paper splits the image into patches of equal size and do a linear transformation on the flattened pixels for each patch. We implement the same thing through a convolution layer, because it's simpler to implement. """ def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'): """ * `d_model` is the transformer embeddings size * `patch_size` is the size of the patch * `in_channels` is the number of channels in the input image (3 for rgb) """ super().__init__() self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride= patch_size) def forward(self, x: 'torch.Tensor'): """ * `x` is the input image of shape `[batch_size, channels, height, width]` """ x = self.conv(x) bs, c, h, w = x.shape x = x.permute(2, 3, 0, 1) x = x.view(h * w, bs, c) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'patch_size': 4, 'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (1, 4, 4), (1, 4, 1), 0 ), primals_1, primals_3 class PatchEmbeddingsNew(Module): """ <a id="PatchEmbeddings"></a> ## Get patch embeddings The paper splits the image into patches of equal size and do a linear transformation on the flattened pixels for each patch. We implement the same thing through a convolution layer, because it's simpler to implement. """ def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'): """ * `d_model` is the transformer embeddings size * `patch_size` is the size of the patch * `in_channels` is the number of channels in the input image (3 for rgb) """ super().__init__() self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride= patch_size) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
PatchEmbeddings
false
16,573
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
ToRGB
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_1[grid(48)](primals_5, buf1, buf2, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 12, 4, 4), (192, 16, 4, 1)) buf4 = reinterpret_tensor(buf3, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_2[grid(192)](buf4, primals_6, buf5, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf4, primals_3, primals_5, buf1, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf5 class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class ToRGBNew(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, input_0, input_1): primals_6 = self.bias primals_2 = self.to_style.bias primals_1 = self.to_style.weight.weight primals_5 = self.conv.weight.weight primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
ToRGB
false
16,574
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
DiceLoss
import torch import torch.nn as nn import torch.hub def dice_loss(input, target): smooth = 1.0 input = torch.sigmoid(input) if input.dim() == 4: B, C, _H, _W = input.size() iflat = input.view(B * C, -1) tflat = target.view(B * C, -1) else: assert input.dim() == 3 B, _H, _W = input.size() iflat = input.view(B, -1) tflat = target.view(B, -1) intersection = (iflat * tflat).sum(dim=1) loss = 1 - (2.0 * intersection + smooth) / (iflat.sum(dim=1) + tflat. sum(dim=1) + smooth) loss = loss.mean() return loss class DiceLoss(nn.Module): def __init__(self): super().__init__() pass def forward(self, input, target): return dice_loss(input, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tmp3 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16,), (1,), torch.float32) buf1 = empty_strided_cuda((16,), (1,), torch.float32) buf2 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(16)](arg0_1, arg1_1, buf0, buf1, buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, def dice_loss(input, target): smooth = 1.0 input = torch.sigmoid(input) if input.dim() == 4: B, C, _H, _W = input.size() iflat = input.view(B * C, -1) tflat = target.view(B * C, -1) else: assert input.dim() == 3 B, _H, _W = input.size() iflat = input.view(B, -1) tflat = target.view(B, -1) intersection = (iflat * tflat).sum(dim=1) loss = 1 - (2.0 * intersection + smooth) / (iflat.sum(dim=1) + tflat. sum(dim=1) + smooth) loss = loss.mean() return loss class DiceLossNew(nn.Module): def __init__(self): super().__init__() pass def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
thangnx183/kaggle-understanding-clouds
DiceLoss
false
16,575
[ "BSD-2-Clause" ]
207
15ad2a9029958262437b899cb00525579da23911
https://github.com/thangnx183/kaggle-understanding-clouds/tree/15ad2a9029958262437b899cb00525579da23911
StyleBlock
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import Optional from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = rindex // 9 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 36 * x0), rmask & xmask, eviction_policy ='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), rmask & xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 36 * x4), tmp13, rmask & xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf3, primals_5, buf1, buf4, 16, 36, XBLOCK=8, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2[grid(256)]( buf6, primals_7, primals_6, primals_8, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 del primals_8 return (buf6, primals_3, primals_5, primals_6, buf1, buf3, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), buf7) class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlockNew(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, input_0, input_1, input_2): primals_7 = self.scale_noise primals_2 = self.bias primals_8 = self.to_style.bias primals_1 = self.to_style.weight.weight primals_5 = self.conv.weight.weight primals_4 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
StyleBlock
false
16,576
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
AddTensors
import torch import torch.nn as nn import torch.hub class AddTensors(nn.Module): """ Adds all its inputs together. """ def forward(self, xs): return sum(xs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + (64 + x0), xmask) tmp5 = tl.load(in_ptr0 + (128 + x0), xmask) tmp7 = tl.load(in_ptr0 + (192 + x0), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class AddTensorsNew(nn.Module): """ Adds all its inputs together. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
theoway/raster-vision
AddTensors
false
16,577
[ "Apache-2.0" ]
1,577
dab675517f904771e2ce8c052494f8a6f1ddc026
https://github.com/theoway/raster-vision/tree/dab675517f904771e2ce8c052494f8a6f1ddc026
ACGANDiscriminator
import torch import torch.nn as nn import torch.nn.utils as utils import torch.nn.functional as F from torchvision import utils def global_pooling(input, pooling='mean'): if pooling == 'mean': return input.mean(3).mean(2) elif pooling == 'sum': return input.sum(3).sum(2) else: raise NotImplementedError() class CustomConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None, bias=True, spectral_norm=False, residual_init=True): super(CustomConv2d, self).__init__() self.residual_init = residual_init if padding is None: padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) if spectral_norm: self.conv = utils.spectral_norm(self.conv) def forward(self, input): return self.conv(input) class CustomLinear(nn.Module): def __init__(self, in_features, out_features, bias=True, spectral_norm= False): super(CustomLinear, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) if spectral_norm: self.linear = utils.spectral_norm(self.linear) def forward(self, input): return self.linear(input) class ConvMeanPool(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(ConvMeanPool, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = self.conv(output) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(MeanPoolConv, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, resample= None, spectral_norm=False): super(ResidualBlock, self).__init__() if in_channels != out_channels or resample is not None: self.learnable_shortcut = True else: self.learnable_shortcut = False self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() if resample == 'down': self.conv_shortcut = ConvMeanPool(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False ) self.conv1 = CustomConv2d(in_channels, in_channels, kernel_size =kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) elif resample is None: if self.learnable_shortcut: self.conv_shortcut = CustomConv2d(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) self.conv2 = CustomConv2d(out_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) else: raise NotImplementedError() def forward(self, input): if self.learnable_shortcut: shortcut = self.conv_shortcut(input) else: shortcut = input output = input output = self.relu1(output) output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class OptimizedResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, spectral_norm=False): super(OptimizedResidualBlock, self).__init__() self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv_shortcut = MeanPoolConv(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.relu2 = nn.ReLU() def forward(self, input): shortcut = self.conv_shortcut(input) output = input output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class ACGANDiscriminator(nn.Module): def __init__(self, num_classes=10, channels=128, dropout=False, spectral_norm=False, pooling='mean'): super(ACGANDiscriminator, self).__init__() self.num_classes = num_classes self.channels = channels self.dropout = dropout self.spectral_norm = spectral_norm self.pooling = pooling self.block1 = OptimizedResidualBlock(3, channels, 3, spectral_norm= spectral_norm) self.block2 = ResidualBlock(channels, channels, 3, resample='down', spectral_norm=spectral_norm) self.block3 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.block4 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.relu5 = nn.ReLU() self.linear5dis = CustomLinear(channels, 1, spectral_norm=spectral_norm ) self.linear5cls = CustomLinear(channels, num_classes) def forward(self, input, dropout=None): if dropout is None: dropout = self.dropout output = input output = self.block1(output) output = self.block2(output) output = F.dropout(output, p=0.2, training=dropout) output = self.block3(output) output = F.dropout(output, p=0.5, training=dropout) output = self.block4(output) output = F.dropout(output, p=0.5, training=dropout) output = self.relu5(output) out_feat = global_pooling(output, 'mean') output = global_pooling(output, self.pooling) out_dis = self.linear5dis(output) out_cls = self.linear5cls(out_feat) return out_dis.squeeze(), out_cls.squeeze(), out_feat def get_inputs(): return [torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.utils as utils from torchvision import utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_add_div_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 2 x2 = xindex // 6 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 24 * x2), xmask) tmp1 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 24 * x2), xmask) tmp3 = tl.load(in_ptr0 + (3 + x0 + 6 * x1 + 24 * x2), xmask) tmp5 = tl.load(in_ptr0 + (15 + x0 + 6 * x1 + 24 * x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_div_relu_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x1 = xindex // 128 % 2 x2 = xindex // 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 256 * x1 + 1024 * x2), None) tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (512 + x0 + 256 * x1 + 1024 * x2), None) tmp9 = tl.load(in_ptr1 + (128 + x0 + 256 * x1 + 1024 * x2), None) tmp12 = tl.load(in_ptr1 + (640 + x0 + 256 * x1 + 1024 * x2), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tmp2 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tl.store(in_out_ptr0 + x3, tmp17, None) tl.store(out_ptr0 + x3, tmp19, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_div_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (256 + x0 + 512 * x1), xmask) tmp6 = tl.load(in_ptr0 + (128 + x0 + 512 * x1), xmask) tmp9 = tl.load(in_ptr0 + (384 + x0 + 512 * x1), xmask) tmp14 = tl.load(in_ptr2 + (x0 + 512 * x1), xmask) tmp15 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (256 + x0 + 512 * x1), xmask) tmp20 = tl.load(in_ptr2 + (128 + x0 + 512 * x1), xmask) tmp23 = tl.load(in_ptr2 + (384 + x0 + 512 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tmp16 = tmp14 + tmp15 tmp18 = tmp17 + tmp15 tmp19 = tmp16 + tmp18 tmp21 = tmp20 + tmp15 tmp22 = tmp19 + tmp21 tmp24 = tmp23 + tmp15 tmp25 = tmp22 + tmp24 tmp26 = tmp25 * tmp12 tmp27 = tmp13 + tmp26 tmp28 = tl.full([1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr0 + x2, tmp27, xmask) tl.store(out_ptr1 + x2, tmp29, xmask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_convolution_mean_relu_threshold_backward_10( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = 1.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 / tmp11 tmp14 = 0.0 tmp15 = tmp10 <= tmp14 tl.store(out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) tl.store(out_ptr2 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (128, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_3, (128,), (1,)) assert_size_stride(primals_4, (128, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (1, 128), (128, 1)) assert_size_stride(primals_23, (1,), (1,)) assert_size_stride(primals_24, (10, 128), (128, 1)) assert_size_stride(primals_25, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 1, 12, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 16)](primals_1, buf0, 12, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((128, 3, 3, 3), (27, 1, 9, 3), torch.float32) triton_poi_fused_1[grid(384, 9)](primals_4, buf1, 384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_6, buf2, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_10, buf3, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf7 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_18, buf7, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf8 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_20, buf8, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf9 = empty_strided_cuda((4, 3, 2, 2), (12, 1, 6, 3), torch.float32) triton_poi_fused_add_div_3[grid(48)](buf0, buf9, 48, XBLOCK=64, num_warps=1, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 2, 2), (512, 1, 256, 128)) buf11 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 4, 4), (2048, 1, 512, 128)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_4[grid(8192)](buf12, primals_5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf13 = extern_kernels.convolution(buf12, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 4, 4), (2048, 1, 512, 128)) buf14 = buf10 del buf10 buf16 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_add_convolution_div_relu_5[grid(2048)](buf14, primals_3, buf13, primals_7, buf16, 2048, XBLOCK=256, num_warps =4, num_stages=1) del buf13 del primals_3 del primals_7 buf15 = extern_kernels.convolution(buf14, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 2, 2), (512, 1, 256, 128)) buf17 = extern_kernels.convolution(buf16, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 2, 2), (512, 1, 256, 128)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_6[grid(2048)](buf18, primals_11, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf19 = extern_kernels.convolution(buf18, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 2, 2), (512, 1, 256, 128)) buf20 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf21 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) triton_poi_fused_add_div_relu_7[grid(512)](buf15, primals_9, buf19, primals_13, buf20, buf21, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf15 del buf19 del primals_13 del primals_9 buf22 = extern_kernels.convolution(buf21, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 1, 1), (128, 1, 128, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_8[grid(512)](buf23, primals_15, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 1, 1), (128, 1, 128, 128)) buf25 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) triton_poi_fused_add_convolution_relu_9[grid(512)](buf20, buf24, primals_17, buf25, 512, XBLOCK=256, num_warps=4, num_stages=1) buf26 = extern_kernels.convolution(buf25, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 1, 1), (128, 1, 128, 128)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_8[grid(512)](buf27, primals_19, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf28 = extern_kernels.convolution(buf27, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 1, 1), (128, 1, 128, 128)) buf29 = buf20 del buf20 buf30 = empty_strided_cuda((4, 128), (128, 1), torch.float32) buf31 = empty_strided_cuda((4, 128), (128, 1), torch.float32) buf35 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.bool) triton_poi_fused_add_convolution_mean_relu_threshold_backward_10[grid (512)](buf29, buf24, primals_17, buf28, primals_21, buf30, buf31, buf35, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf24 del buf28 del buf29 del primals_17 del primals_21 buf33 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_23, buf31, reinterpret_tensor( primals_22, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf33) del primals_23 buf34 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_25, buf30, reinterpret_tensor( primals_24, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf34) del primals_25 return (reinterpret_tensor(buf33, (4,), (1,), 0), buf34, buf30, buf0, primals_2, buf1, buf2, primals_8, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf12, buf14, buf16, buf18, buf21, buf23, buf25, buf27, buf30, buf31, primals_24, primals_22, buf35) def global_pooling(input, pooling='mean'): if pooling == 'mean': return input.mean(3).mean(2) elif pooling == 'sum': return input.sum(3).sum(2) else: raise NotImplementedError() class CustomConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None, bias=True, spectral_norm=False, residual_init=True): super(CustomConv2d, self).__init__() self.residual_init = residual_init if padding is None: padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) if spectral_norm: self.conv = utils.spectral_norm(self.conv) def forward(self, input): return self.conv(input) class CustomLinear(nn.Module): def __init__(self, in_features, out_features, bias=True, spectral_norm= False): super(CustomLinear, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) if spectral_norm: self.linear = utils.spectral_norm(self.linear) def forward(self, input): return self.linear(input) class ConvMeanPool(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(ConvMeanPool, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = self.conv(output) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(MeanPoolConv, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, resample= None, spectral_norm=False): super(ResidualBlock, self).__init__() if in_channels != out_channels or resample is not None: self.learnable_shortcut = True else: self.learnable_shortcut = False self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() if resample == 'down': self.conv_shortcut = ConvMeanPool(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False ) self.conv1 = CustomConv2d(in_channels, in_channels, kernel_size =kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) elif resample is None: if self.learnable_shortcut: self.conv_shortcut = CustomConv2d(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) self.conv2 = CustomConv2d(out_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) else: raise NotImplementedError() def forward(self, input): if self.learnable_shortcut: shortcut = self.conv_shortcut(input) else: shortcut = input output = input output = self.relu1(output) output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class OptimizedResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, spectral_norm=False): super(OptimizedResidualBlock, self).__init__() self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv_shortcut = MeanPoolConv(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.relu2 = nn.ReLU() def forward(self, input): shortcut = self.conv_shortcut(input) output = input output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class ACGANDiscriminatorNew(nn.Module): def __init__(self, num_classes=10, channels=128, dropout=False, spectral_norm=False, pooling='mean'): super(ACGANDiscriminatorNew, self).__init__() self.num_classes = num_classes self.channels = channels self.dropout = dropout self.spectral_norm = spectral_norm self.pooling = pooling self.block1 = OptimizedResidualBlock(3, channels, 3, spectral_norm= spectral_norm) self.block2 = ResidualBlock(channels, channels, 3, resample='down', spectral_norm=spectral_norm) self.block3 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.block4 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.relu5 = nn.ReLU() self.linear5dis = CustomLinear(channels, 1, spectral_norm=spectral_norm ) self.linear5cls = CustomLinear(channels, num_classes) def forward(self, input_0): primals_4 = self.block1.conv1.conv.weight primals_3 = self.block1.conv1.conv.bias primals_6 = self.block1.conv2.conv.conv.weight primals_5 = self.block1.conv2.conv.conv.bias primals_2 = self.block1.conv_shortcut.conv.conv.weight primals_7 = self.block1.conv_shortcut.conv.conv.bias primals_8 = self.block2.conv_shortcut.conv.conv.weight primals_9 = self.block2.conv_shortcut.conv.conv.bias primals_10 = self.block2.conv1.conv.weight primals_11 = self.block2.conv1.conv.bias primals_12 = self.block2.conv2.conv.conv.weight primals_13 = self.block2.conv2.conv.conv.bias primals_14 = self.block3.conv1.conv.weight primals_15 = self.block3.conv1.conv.bias primals_16 = self.block3.conv2.conv.weight primals_17 = self.block3.conv2.conv.bias primals_18 = self.block4.conv1.conv.weight primals_19 = self.block4.conv1.conv.bias primals_20 = self.block4.conv2.conv.weight primals_21 = self.block4.conv2.conv.bias primals_22 = self.linear5dis.linear.weight primals_23 = self.linear5dis.linear.bias primals_24 = self.linear5cls.linear.weight primals_25 = self.linear5cls.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0], output[1], output[2]
takuhirok/rGAN
ACGANDiscriminator
false
16,578
[ "MIT" ]
103
6f7a092de5814c662fd17224b3d48bebe7e03c2f
https://github.com/takuhirok/rGAN/tree/6f7a092de5814c662fd17224b3d48bebe7e03c2f
EqualizedLinear
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np from torch import nn import torch.utils.data from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinearNew(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, input_0): primals_2 = self.bias primals_1 = self.weight.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
EqualizedLinear
false
16,579
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SymmetricBCELoss
import torch import torch.nn as nn import torch.nn.functional as F import torch.hub class SymmetricBCELoss(nn.Module): def __init__(self, alpha=0.1, beta=0.1): super().__init__() self.alpha = alpha self.beta = beta def forward(self, input, target): y_true = target y_pred = torch.sigmoid(input) y_true = y_true.view(-1, 1) y_pred = y_pred.view(-1, 1) y_true_1 = y_true y_pred_1 = y_pred y_true_2 = y_true y_pred_2 = y_pred y_pred_1 = torch.clamp(y_pred_1, 1e-07, 1.0) y_true_2 = torch.clamp(y_true_2, 0.0001, 1.0) loss_ce = F.kl_div(torch.log(y_pred_1), y_true_1) loss_rce = F.kl_div(torch.log(y_true_2), y_pred_2) return self.alpha * loss_ce + self.beta * loss_rce def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp1 = libdevice.isnan(tmp0).to(tl.int1) tmp2 = 0.0 tmp3 = tmp0 == tmp2 tmp4 = tl_math.log(tmp0) tmp5 = tmp0 * tmp4 tmp6 = tl.where(tmp3, tmp2, tmp5) tmp7 = float('nan') tmp8 = tl.where(tmp1, tmp7, tmp6) tmp10 = tl.sigmoid(tmp9) tmp11 = 1e-07 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = tl_math.log(tmp14) tmp16 = tmp0 * tmp15 tmp17 = tmp8 - tmp16 tmp18 = tl.broadcast_to(tmp17, [RBLOCK]) tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0)) tmp21 = libdevice.isnan(tmp10).to(tl.int1) tmp22 = tmp10 == tmp2 tmp23 = tl_math.log(tmp10) tmp24 = tmp10 * tmp23 tmp25 = tl.where(tmp22, tmp2, tmp24) tmp26 = tl.where(tmp21, tmp7, tmp25) tmp27 = 0.0001 tmp28 = triton_helpers.maximum(tmp0, tmp27) tmp29 = triton_helpers.minimum(tmp28, tmp13) tmp30 = tl_math.log(tmp29) tmp31 = tmp10 * tmp30 tmp32 = tmp26 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 256.0 tmp37 = tmp20 / tmp36 tmp38 = 0.1 tmp39 = tmp37 * tmp38 tmp40 = tmp35 / tmp36 tmp41 = tmp40 * tmp38 tmp42 = tmp39 + tmp41 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp42, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class SymmetricBCELossNew(nn.Module): def __init__(self, alpha=0.1, beta=0.1): super().__init__() self.alpha = alpha self.beta = beta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
thangnx183/kaggle-understanding-clouds
SymmetricBCELoss
false
16,580
[ "BSD-2-Clause" ]
207
15ad2a9029958262437b899cb00525579da23911
https://github.com/thangnx183/kaggle-understanding-clouds/tree/15ad2a9029958262437b899cb00525579da23911
UpSample
import torch from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class UpSample(nn.Module): """ <a id="up_sample"></a> ### Up-sample The up-sample operation scales the image up by $2 imes$ and [smoothens](#smooth) each feature channel. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.smooth = Smooth() def forward(self, x: 'torch.Tensor'): return self.smooth(self.up_sample(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = x0 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 + tmp2 tmp16 = tmp15 * tmp2 tmp17 = tmp16 - tmp2 tmp18 = triton_helpers.maximum(tmp17, tmp6) tmp19 = tmp18.to(tl.int32) tmp20 = tmp19 + tmp9 tmp21 = triton_helpers.minimum(tmp20, tmp11) tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp12 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (tmp19 + 4 * tmp12 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tmp22 - tmp23 tmp25 = tmp19.to(tl.float32) tmp26 = tmp18 - tmp25 tmp27 = triton_helpers.maximum(tmp26, tmp6) tmp28 = 1.0 tmp29 = triton_helpers.minimum(tmp27, tmp28) tmp30 = tmp24 * tmp29 tmp31 = tmp23 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp19 + 4 * tmp8 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp8 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp8.to(tl.float32) tmp39 = tmp7 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp6) tmp41 = triton_helpers.minimum(tmp40, tmp28) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tl.store(in_out_ptr0 + x4, tmp43, xmask) @triton.jit def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 % 10 x2 = xindex // 100 x3 = xindex tmp0 = tl.load(in_ptr0 + (8 * (7 * (7 <= 0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 7)) + 64 * x2 + ( 7 * (7 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 7))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (1024)](buf2, arg0_1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((16, 1, 10, 10), (100, 100, 10, 1), torch .float32) triton_poi_fused_replication_pad2d_1[grid(1600)](buf2, buf3, 1600, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf4 = extern_kernels.convolution(buf3, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (16, 1, 8, 8), (64, 64, 8, 1)) del arg1_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 8, 8), (256, 64, 8, 1), 0), class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class UpSampleNew(nn.Module): """ <a id="up_sample"></a> ### Up-sample The up-sample operation scales the image up by $2 imes$ and [smoothens](#smooth) each feature channel. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.smooth = Smooth() def forward(self, input_0): arg1_1 = self.smooth.kernel arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
UpSample
false
16,581
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
BertAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertSelfAttention(nn.Module): """ self attention层 原理可看这篇博客: http://jalammar.github.io/illustrated-transformer/ """ def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): """ 实现 self attention + Add & Norm """ def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5, layer_norm_eps=1)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf13, primals_3, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf13, primals_3, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_12 return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9 class BertSelfAttention(nn.Module): """ self attention层 原理可看这篇博客: http://jalammar.github.io/illustrated-transformer/ """ def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): """ 实现 self attention + Add & Norm """ def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
techthiyanes/nlp-notebook
BertAttention
false
16,582
[ "MIT" ]
136
0e5f4b75e635128d4056c89a6c65bea60c15e836
https://github.com/techthiyanes/nlp-notebook/tree/0e5f4b75e635128d4056c89a6c65bea60c15e836
moving_avg
import torch import torch.nn as nn class moving_avg(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-1 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp14 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 24 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 24 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 24 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 24 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(96)](arg0_1, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 3), (12, 1, 48, 4), torch.float32) triton_poi_fused_avg_pool2d_1[grid(48)](buf0, buf1, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return reinterpret_tensor(buf1, (4, 3, 4), (12, 4, 1), 0), class moving_avgNew(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avgNew, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
thuml/Autoformer
moving_avg
false
16,583
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
GeneratorBlock
import math import torch import numpy as np from torch import nn from typing import Tuple import torch.nn.functional as F import torch.utils.data from typing import Optional from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) class GeneratorBlock(nn.Module): """ <a id="generator_block"></a> ### Generator Block ![Generator block](generator_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is a single channel). [`toRGB`](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.*--- The generator block consists of two [style blocks](#style_block) ($3 imes 3$ convolutions with style modulation) and an RGB output. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.style_block1 = StyleBlock(d_latent, in_features, out_features) self.style_block2 = StyleBlock(d_latent, out_features, out_features) self.to_rgb = ToRGB(d_latent, out_features) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tuple of two noise tensors of shape `[batch_size, 1, height, width]` """ x = self.style_block1(x, w, noise[0]) x = self.style_block2(x, w, noise[1]) rgb = self.to_rgb(x, w) return x, rgb def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import Optional from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = rindex // 9 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 36 * x0), rmask & xmask, eviction_policy ='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), rmask & xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 36 * x4), tmp13, rmask & xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tl.store(in_out_ptr0 + x4, tmp12, xmask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_12, (1,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_17, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf3, primals_6, buf1, buf4, 16, 36, XBLOCK=8, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_0[grid(16)](primals_9, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, primals_4, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = reinterpret_tensor(buf6, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf6 buf9 = reinterpret_tensor(buf8, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf8 buf10 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf9, primals_11, buf7, buf10, 16, 36, XBLOCK=8, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2[grid(256)]( buf11, primals_7, primals_1, primals_8, buf20, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 del primals_8 buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 16, 4, 4), (0, 16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation= (1, 1), transposed=False, output_padding=(0, 0), groups=4, bias =None) assert_size_stride(buf12, (1, 16, 4, 4), (256, 16, 4, 1)) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf12 triton_poi_fused_add_leaky_relu_mul_3[grid(256)](buf13, primals_12, primals_1, primals_13, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_12 del primals_13 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_0[grid(16)](primals_14, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, primals_4, reinterpret_tensor( buf14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del buf14 del primals_15 buf16 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch .float32) triton_poi_fused_mul_4[grid(48)](primals_16, buf15, buf16, 48, XBLOCK=64, num_warps=1, num_stages=1) buf17 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=( 1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None ) assert_size_stride(buf17, (1, 12, 4, 4), (192, 16, 4, 1)) buf18 = reinterpret_tensor(buf17, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf17 buf19 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_5[grid(192)](buf18, primals_17, buf19, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 return (buf13, buf18, primals_4, primals_6, primals_11, primals_16, reinterpret_tensor(primals_1, (4,), (1,), 0), buf1, buf3, reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(primals_1, (4,), (1,), 4), buf7, buf9, reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(buf11, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf19, buf20) class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) class GeneratorBlockNew(nn.Module): """ <a id="generator_block"></a> ### Generator Block ![Generator block](generator_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is a single channel). [`toRGB`](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.*--- The generator block consists of two [style blocks](#style_block) ($3 imes 3$ convolutions with style modulation) and an RGB output. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.style_block1 = StyleBlock(d_latent, in_features, out_features) self.style_block2 = StyleBlock(d_latent, out_features, out_features) self.to_rgb = ToRGB(d_latent, out_features) def forward(self, input_0, input_1, input_2): primals_7 = self.style_block1.scale_noise primals_3 = self.style_block1.bias primals_8 = self.style_block1.to_style.bias primals_1 = self.style_block1.to_style.weight.weight primals_6 = self.style_block1.conv.weight.weight primals_12 = self.style_block2.scale_noise primals_10 = self.style_block2.bias primals_13 = self.style_block2.to_style.bias primals_2 = self.style_block2.to_style.weight.weight primals_11 = self.style_block2.conv.weight.weight primals_17 = self.to_rgb.bias primals_15 = self.to_rgb.to_style.bias primals_4 = self.to_rgb.to_style.weight.weight primals_16 = self.to_rgb.conv.weight.weight primals_5 = input_0 primals_9 = input_1 primals_14 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
techthiyanes/annotated_deep_learning_paper_implementations
GeneratorBlock
false
16,584
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
BiasAdd
from _paritybench_helpers import _mock_config import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BiasAdd(nn.Module): def __init__(self, channels, opts, act='linear', alpha=None, gain=None, lrmul=1): """ BiasAdd """ super(BiasAdd, self).__init__() self.opts = opts self.bias = torch.nn.Parameter(torch.zeros(channels, 1, 1) * lrmul) self.act = act self.alpha = alpha if alpha is not None else 0.2 self.gain = gain if gain is not None else 1.0 def forward(self, x): x += self.bias if self.act == 'linear': pass elif self.act == 'lrelu': x = F.leaky_relu(x, self.alpha, inplace=True) x = x * np.sqrt(2) if self.gain != 1: x = x * self.gain return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'opts': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, buf0 class BiasAddNew(nn.Module): def __init__(self, channels, opts, act='linear', alpha=None, gain=None, lrmul=1): """ BiasAdd """ super(BiasAddNew, self).__init__() self.opts = opts self.bias = torch.nn.Parameter(torch.zeros(channels, 1, 1) * lrmul) self.act = act self.alpha = alpha if alpha is not None else 0.2 self.gain = gain if gain is not None else 1.0 def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
tomguluson92/StyleGAN2_PyTorch
BiasAdd
false
16,585
[ "MIT" ]
89
4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
https://github.com/tomguluson92/StyleGAN2_PyTorch/tree/4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
GLU
import torch import torch.nn as nn def initialize_weight(x): nn.init.xavier_uniform_(x.weight) if x.bias is not None: nn.init.constant_(x.bias, 0) class GLU(nn.Module): def __init__(self, in_features, dropout_rate): super(GLU, self).__init__() self.sigm = nn.Sigmoid() self.W = nn.Linear(in_features, out_features=512, bias=True) self.V = nn.Linear(in_features, out_features=512, bias=True) initialize_weight(self.W) initialize_weight(self.V) self.dropout = nn.Dropout(dropout_rate) def forward(self, x): x = self.W(x) * self.sigm(self.V(x)) x = self.dropout(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_ptr1 + x0, None) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 4), (4, 1)) assert_size_stride(primals_5, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(32768)](buf0, buf1, buf2, 32768, XBLOCK=256, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1 def initialize_weight(x): nn.init.xavier_uniform_(x.weight) if x.bias is not None: nn.init.constant_(x.bias, 0) class GLUNew(nn.Module): def __init__(self, in_features, dropout_rate): super(GLUNew, self).__init__() self.sigm = nn.Sigmoid() self.W = nn.Linear(in_features, out_features=512, bias=True) self.V = nn.Linear(in_features, out_features=512, bias=True) initialize_weight(self.W) initialize_weight(self.V) self.dropout = nn.Dropout(dropout_rate) def forward(self, input_0): primals_1 = self.W.weight primals_2 = self.W.bias primals_4 = self.V.weight primals_5 = self.V.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
tijsmaas/transformer-pytorch
GLU
false
16,586
[ "MIT" ]
237
bb517979d62c416f68d66325f51826bbbf4ba1bd
https://github.com/tijsmaas/transformer-pytorch/tree/bb517979d62c416f68d66325f51826bbbf4ba1bd
SquaredErrorBayesRisk
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class SquaredErrorBayesRisk(Module): """ <a id="SquaredErrorBayesRisk"></a> ## Bayes Risk with Squared Error Loss Here the cost function is squared error, $$\\sum_{k=1}^K (y_k - p_k)^2 = \\Vert \\mathbf{y} - \\mathbf{p} \\Vert_2^2$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K (y_k - p_k)^2 \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K \\mathbb{E} \\Big[ y_k^2 -2 y_k p_k + p_k^2 \\Big] \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\end{align} Where $$\\mathbb{E}[p_k] = \\hat{p}_k = rac{ extcolor{orange}{lpha_k}}{S}$$ is the expected probability when sampled from the Dirichlet distribution and $$\\mathbb{E}[p_k^2] = \\mathbb{E}[p_k]^2 + ext{Var}(p_k)$$ where $$ ext{Var}(p_k) = rac{ extcolor{orange}{lpha_k}(S - extcolor{orange}{lpha_k})}{S^2 (S + 1)} = rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1}$$ is the variance. This gives, egin{align} \\mathcal{L}(\\Theta) &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k]^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ig( y_k -\\mathbb{E}[p_k] ig)^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ( y_k -\\hat{p}_k)^2 + rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1} \\Big) \\end{align} This first part of the equation $ig(y_k -\\mathbb{E}[p_k]ig)^2$ is the error term and the second part is the variance. """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 strength = alpha.sum(dim=-1) p = alpha / strength[:, None] err = (target - p) ** 2 var = p * (1 - p) / (strength[:, None] + 1) loss = (err + var).sum(dim=-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (4 * x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy ='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy ='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * x2), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = tmp4 + tmp6 tmp9 = tmp8 + tmp1 tmp10 = tmp7 + tmp9 tmp12 = tmp11 + tmp1 tmp13 = tmp10 + tmp12 tmp14 = tmp2 / tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r3, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (16 * r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (1 + 16 * r0 + 64 * r2), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr2 + (2 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (3 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (5 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (6 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr2 + (7 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (8 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (9 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr2 + (10 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (11 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp63 = tl.load(in_ptr0 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr1 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp69 = tl.load(in_ptr2 + (12 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr2 + (13 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (14 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp77 = tl.load(in_ptr2 + (15 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp6 = tmp1 * tmp5 tmp8 = tmp7 + tmp4 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp16 = tmp15 + tmp4 tmp17 = tmp14 + tmp16 tmp18 = tmp17 + tmp4 tmp19 = tmp6 / tmp18 tmp20 = tmp3 + tmp19 tmp23 = tmp21 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp4 - tmp22 tmp26 = tmp22 * tmp25 tmp28 = tmp27 + tmp4 tmp30 = tmp29 + tmp4 tmp31 = tmp28 + tmp30 tmp33 = tmp32 + tmp4 tmp34 = tmp31 + tmp33 tmp36 = tmp35 + tmp4 tmp37 = tmp34 + tmp36 tmp38 = tmp37 + tmp4 tmp39 = tmp26 / tmp38 tmp40 = tmp24 + tmp39 tmp41 = tmp20 + tmp40 tmp44 = tmp42 - tmp43 tmp45 = tmp44 * tmp44 tmp46 = tmp4 - tmp43 tmp47 = tmp43 * tmp46 tmp49 = tmp48 + tmp4 tmp51 = tmp50 + tmp4 tmp52 = tmp49 + tmp51 tmp54 = tmp53 + tmp4 tmp55 = tmp52 + tmp54 tmp57 = tmp56 + tmp4 tmp58 = tmp55 + tmp57 tmp59 = tmp58 + tmp4 tmp60 = tmp47 / tmp59 tmp61 = tmp45 + tmp60 tmp62 = tmp41 + tmp61 tmp65 = tmp63 - tmp64 tmp66 = tmp65 * tmp65 tmp67 = tmp4 - tmp64 tmp68 = tmp64 * tmp67 tmp70 = tmp69 + tmp4 tmp72 = tmp71 + tmp4 tmp73 = tmp70 + tmp72 tmp75 = tmp74 + tmp4 tmp76 = tmp73 + tmp75 tmp78 = tmp77 + tmp4 tmp79 = tmp76 + tmp78 tmp80 = tmp79 + tmp4 tmp81 = tmp68 / tmp80 tmp82 = tmp66 + tmp81 tmp83 = tmp62 + tmp82 tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK]) tmp86 = tl.sum(tmp84, 1)[:, None] tmp87 = 64.0 tmp88 = tmp86 / tmp87 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp88, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1[grid(1)](buf3, arg1_1, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf0 return buf3, class SquaredErrorBayesRiskNew(Module): """ <a id="SquaredErrorBayesRisk"></a> ## Bayes Risk with Squared Error Loss Here the cost function is squared error, $$\\sum_{k=1}^K (y_k - p_k)^2 = \\Vert \\mathbf{y} - \\mathbf{p} \\Vert_2^2$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K (y_k - p_k)^2 \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K \\mathbb{E} \\Big[ y_k^2 -2 y_k p_k + p_k^2 \\Big] \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\end{align} Where $$\\mathbb{E}[p_k] = \\hat{p}_k = rac{ extcolor{orange}{lpha_k}}{S}$$ is the expected probability when sampled from the Dirichlet distribution and $$\\mathbb{E}[p_k^2] = \\mathbb{E}[p_k]^2 + ext{Var}(p_k)$$ where $$ ext{Var}(p_k) = rac{ extcolor{orange}{lpha_k}(S - extcolor{orange}{lpha_k})}{S^2 (S + 1)} = rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1}$$ is the variance. This gives, egin{align} \\mathcal{L}(\\Theta) &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k]^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ig( y_k -\\mathbb{E}[p_k] ig)^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ( y_k -\\hat{p}_k)^2 + rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1} \\Big) \\end{align} This first part of the equation $ig(y_k -\\mathbb{E}[p_k]ig)^2$ is the error term and the second part is the variance. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SquaredErrorBayesRisk
false
16,587
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
series_decomp
import torch import torch.nn as nn class moving_avg(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x class series_decomp(nn.Module): """ Series decomposition block """ def __init__(self, kernel_size): super(series_decomp, self).__init__() self.moving_avg = moving_avg(kernel_size, stride=1) def forward(self, x): moving_mean = self.moving_avg(x) res = x - moving_mean return res, moving_mean def get_inputs(): return [torch.rand([4, 2, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 8 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-1 + x1) + 8 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 4, tl.int64) tmp14 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 2, 4), (8, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_poi_fused_avg_pool2d_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_sub_2[grid(32)](arg0_1, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del arg0_1 return buf2, reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0) class moving_avg(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x class series_decompNew(nn.Module): """ Series decomposition block """ def __init__(self, kernel_size): super(series_decompNew, self).__init__() self.moving_avg = moving_avg(kernel_size, stride=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
thuml/Autoformer
series_decomp
false
16,588
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
DilatedNet
import torch import torchvision.transforms.functional as F from torch.nn import functional as F from torch import nn class DilatedNet(nn.Module): def __init__(self, filters): super().__init__() self.filters = filters self.conv1 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=2, dilation=2) self.conv2 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=4, dilation=4) self.conv3 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=8, dilation=8) self.conv4 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=16, dilation=16) def forward(self, x): fst = F.relu(self.conv1(x)) snd = F.relu(self.conv2(fst)) thrd = F.relu(self.conv3(snd)) fourth = F.relu(self.conv4(thrd)) return x + fst + snd + thrd + fourth def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'filters': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x3, xmask) tmp5 = tl.load(in_ptr3 + x3, xmask) tmp7 = tl.load(in_ptr4 + x3, xmask) tmp8 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tmp6 + tmp11 tmp13 = 0.0 tmp14 = tmp11 <= tmp13 tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(16, 16), dilation=(16, 16), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)]( primals_3, buf1, buf3, buf5, buf6, primals_9, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8) class DilatedNetNew(nn.Module): def __init__(self, filters): super().__init__() self.filters = filters self.conv1 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=2, dilation=2) self.conv2 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=4, dilation=4) self.conv3 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=8, dilation=8) self.conv4 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=16, dilation=16) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
tilacyn/dsb2018_topcoders
DilatedNet
false
16,589
[ "MIT" ]
413
e0f95ef70bc062d4dea321d2aa73231a9538cd63
https://github.com/tilacyn/dsb2018_topcoders/tree/e0f95ef70bc062d4dea321d2aa73231a9538cd63
my_Layernorm
import torch import torch.nn as nn class my_Layernorm(nn.Module): """ Special designed layernorm for the seasonal part """ def __init__(self, channels): super(my_Layernorm, self).__init__() self.layernorm = nn.LayerNorm(channels) def forward(self, x): x_hat = self.layernorm(x) bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1) return x_hat - bias def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_repeat_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_repeat_sub_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3 class my_LayernormNew(nn.Module): """ Special designed layernorm for the seasonal part """ def __init__(self, channels): super(my_LayernormNew, self).__init__() self.layernorm = nn.LayerNorm(channels) def forward(self, input_0): primals_1 = self.layernorm.weight primals_2 = self.layernorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
thuml/Autoformer
my_Layernorm
false
16,590
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
Minibatch_stddev_layer
import torch import torch.nn as nn class Minibatch_stddev_layer(nn.Module): """ Minibatch standard deviation layer. (D_stylegan2) """ def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = group_size self.num_new_features = num_new_features def forward(self, x): n, c, h, w = x.shape group_size = min(n, self.group_size) y = x.view(group_size, -1, self.num_new_features, c // self. num_new_features, h, w) y = y - torch.mean(y, dim=0, keepdim=True) y = torch.mean(y ** 2, dim=0) y = torch.sqrt(y + 1e-08) y = torch.mean(y, dim=[2, 3, 4], keepdim=True) y = torch.mean(y, dim=2) y = y.repeat(group_size, 1, h, w) return torch.cat([x, y], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tmp29 = 1.0 tmp30 = tmp28 / tmp29 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp30, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class Minibatch_stddev_layerNew(nn.Module): """ Minibatch standard deviation layer. (D_stylegan2) """ def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = group_size self.num_new_features = num_new_features def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
tomguluson92/StyleGAN2_PyTorch
Minibatch_stddev_layer
false
16,591
[ "MIT" ]
89
4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
https://github.com/tomguluson92/StyleGAN2_PyTorch/tree/4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
LearnedPositionalEmbeddings
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class LearnedPositionalEmbeddings(Module): """ <a id="LearnedPositionalEmbeddings"></a> ## Add parameterized positional encodings This adds learned positional embeddings to patch embeddings. """ def __init__(self, d_model: 'int', max_len: 'int'=5000): """ * `d_model` is the transformer embeddings size * `max_len` is the maximum number of patches """ super().__init__() self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True) def forward(self, x: 'torch.Tensor'): """ * `x` is the patch embeddings of shape `[patches, batch_size, d_model]` """ pe = self.positional_encodings[x.shape[0]] return x + pe def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (5000, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LearnedPositionalEmbeddingsNew(Module): """ <a id="LearnedPositionalEmbeddings"></a> ## Add parameterized positional encodings This adds learned positional embeddings to patch embeddings. """ def __init__(self, d_model: 'int', max_len: 'int'=5000): """ * `d_model` is the transformer embeddings size * `max_len` is the maximum number of patches """ super().__init__() self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True) def forward(self, input_0): primals_1 = self.positional_encodings primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
LearnedPositionalEmbeddings
false
16,592
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Aggregator
import torch import torchvision.transforms.functional as F from torch.nn import functional as F from torch import nn class Aggregator(nn.Module): def __init__(self, in_channels, mid_channels, upsample_factor): super().__init__() self.upsample = nn.Upsample(scale_factor=2 ** upsample_factor) self.conv = nn.Conv2d(in_channels, mid_channels, 3, padding=1) def forward(self, x): x = self.upsample(x) x = F.relu(self.conv(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'mid_channels': 4, 'upsample_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x2 = xindex // 4096 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.0625 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(65536)](primals_1, buf0, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(65536)]( buf2, primals_3, buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1 ) del primals_3 return buf2, primals_2, buf0, buf3 class AggregatorNew(nn.Module): def __init__(self, in_channels, mid_channels, upsample_factor): super().__init__() self.upsample = nn.Upsample(scale_factor=2 ** upsample_factor) self.conv = nn.Conv2d(in_channels, mid_channels, 3, padding=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
tilacyn/dsb2018_topcoders
Aggregator
false
16,593
[ "MIT" ]
413
e0f95ef70bc062d4dea321d2aa73231a9538cd63
https://github.com/tilacyn/dsb2018_topcoders/tree/e0f95ef70bc062d4dea321d2aa73231a9538cd63
AdaptiveMaxPool2d
import torch import torch.nn as nn import torch.nn.functional as F class _SpikeAdaptiveMaxPoolNd(nn.Module): def __init__(self, output_size): super(_SpikeAdaptiveMaxPoolNd, self).__init__() self.output_size = output_size self.return_indices = True def reset_state(self): pass class AdaptiveMaxPool2d(_SpikeAdaptiveMaxPoolNd): """Simple port of PyTorch AdaptiveMaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, x, trace): x = x x, idx = F.adaptive_max_pool2d(x, self.output_size, self.return_indices ) trace = trace[idx] return x > 0, trace def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([16, 4, 4, 4])] def get_init_inputs(): return [[], {'output_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_adaptive_max_pool2d_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex // 64 x4 = xindex % 1024 x5 = xindex tl.load(in_ptr0 + x3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last') tl.store(out_ptr0 + x5, tmp1, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (16, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_gt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256, 64, 16, 4, 1), torch.float32) triton_poi_fused_index_1[grid(16384)](arg0_1, arg1_1, buf1, 16384, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 class _SpikeAdaptiveMaxPoolNd(nn.Module): def __init__(self, output_size): super(_SpikeAdaptiveMaxPoolNd, self).__init__() self.output_size = output_size self.return_indices = True def reset_state(self): pass class AdaptiveMaxPool2dNew(_SpikeAdaptiveMaxPoolNd): """Simple port of PyTorch AdaptiveMaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
tomking/PySNN
AdaptiveMaxPool2d
false
16,594
[ "MIT" ]
175
c99ba6cd28a518dc07cab765acac9b69ac6fe36b
https://github.com/tomking/PySNN/tree/c99ba6cd28a518dc07cab765acac9b69ac6fe36b
TokenEmbedding
import torch import torch.nn as nn class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = yindex // 6 tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK =4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1 class TokenEmbeddingNew(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbeddingNew, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, input_0): primals_2 = self.tokenConv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
thuml/Autoformer
TokenEmbedding
false
16,595
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
ActNorm
import torch import torch.nn as nn class ActNorm(nn.Module): """ ActNorm layer. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim self.mu = nn.Parameter(torch.zeros(dim, dtype=torch.float)) self.log_sigma = nn.Parameter(torch.zeros(dim, dtype=torch.float)) def forward(self, x): z = x * torch.exp(self.log_sigma) + self.mu log_det = torch.sum(self.log_sigma) return z, log_det def inverse(self, z): x = (z - self.mu) / torch.exp(self.log_sigma) log_det = -torch.sum(self.log_sigma) return x, log_det def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_per_fused_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((), (), torch.float32) triton_per_fused_sum_1[grid(1)](primals_1, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) return buf0, buf1, primals_1, primals_2 class ActNormNew(nn.Module): """ ActNorm layer. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim self.mu = nn.Parameter(torch.zeros(dim, dtype=torch.float)) self.log_sigma = nn.Parameter(torch.zeros(dim, dtype=torch.float)) def inverse(self, z): x = (z - self.mu) / torch.exp(self.log_sigma) log_det = -torch.sum(self.log_sigma) return x, log_det def forward(self, input_0): primals_1 = self.mu primals_3 = self.log_sigma primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
tonyduan/hybrid-models
ActNorm
false
16,596
[ "MIT" ]
238
a29bff4756d8306cd24515f2fb825763a71c3d90
https://github.com/tonyduan/hybrid-models/tree/a29bff4756d8306cd24515f2fb825763a71c3d90
GatedMaskedConv2d
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class GatedMaskedConv2d(nn.Module): def __init__(self, in_dim, out_dim=None, kernel_size=3, mask='B'): super(GatedMaskedConv2d, self).__init__() if out_dim is None: out_dim = in_dim self.dim = out_dim self.size = kernel_size self.mask = mask pad = self.size // 2 self.v_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(pad + 1, self.size)) self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0) self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0) self.vh_conv = nn.Conv2d(2 * self.dim, 2 * self.dim, kernel_size=1) self.h_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(1, pad + 1)) self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0) self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0) self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1) def forward(self, v_map, h_map): v_out = self.v_pad2(self.v_conv(self.v_pad1(v_map)))[:, :, :-1, :] v_map_out = F.tanh(v_out[:, :self.dim]) * F.sigmoid(v_out[:, self.dim:] ) vh = self.vh_conv(v_out) h_out = self.h_conv(self.h_pad1(h_map)) if self.mask == 'A': h_out = self.h_pad2(h_out)[:, :, :, :-1] h_out = h_out + vh h_out = F.tanh(h_out[:, :self.dim]) * F.sigmoid(h_out[:, self.dim:]) h_map_out = self.h_conv_res(h_out) if self.mask == 'B': h_map_out = h_map_out + h_map return v_map_out, h_map_out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 5 x0 = xindex % 6 x2 = xindex // 30 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x0 tmp4 = tmp3 >= tmp1 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x4 = xindex // 20 x5 = xindex % 20 x2 = xindex // 20 % 8 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + x2, tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp2, tmp5, tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1 + 160 * x2), xmask) tmp2 = tl.load(in_ptr0 + (80 + x0 + 20 * x1 + 160 * x2), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x4 = xindex % 64 x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 128 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 128 * x2), xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (64 + x4 + 128 * x2), xmask) tmp9 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (64 + x4 + 128 * x2), xmask) tmp12 = tl.load(in_ptr3 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp7 * tmp15 tl.store(out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr1 + x3, tmp15, xmask) tl.store(out_ptr2 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (8, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (8, 4, 1, 2), (8, 2, 2, 1)) assert_size_stride(primals_8, (8,), (1,)) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(480)](primals_1, buf0, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4, 4), (128, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 5, 4), (160, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_1[grid(640)](buf1, primals_3, buf2, 640, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_2[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 4, 4), (128, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_3[grid(320)](primals_6, buf5, 320, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 4, 4), (128, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_4[grid(256)](buf6, primals_8, buf4, primals_5, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del buf6 del primals_5 del primals_8 buf10 = extern_kernels.convolution(buf9, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_add_convolution_5[grid(256)](buf11, primals_10, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 del primals_6 return (buf3, buf11, primals_2, primals_4, primals_7, primals_9, buf0, reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), buf5, buf7, buf8, buf9) class GatedMaskedConv2dNew(nn.Module): def __init__(self, in_dim, out_dim=None, kernel_size=3, mask='B'): super(GatedMaskedConv2dNew, self).__init__() if out_dim is None: out_dim = in_dim self.dim = out_dim self.size = kernel_size self.mask = mask pad = self.size // 2 self.v_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(pad + 1, self.size)) self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0) self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0) self.vh_conv = nn.Conv2d(2 * self.dim, 2 * self.dim, kernel_size=1) self.h_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(1, pad + 1)) self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0) self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0) self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1) def forward(self, input_0, input_1): primals_2 = self.v_conv.weight primals_3 = self.v_conv.bias primals_4 = self.vh_conv.weight primals_5 = self.vh_conv.bias primals_7 = self.h_conv.weight primals_8 = self.h_conv.bias primals_9 = self.h_conv_res.weight primals_10 = self.h_conv_res.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1]
tom-pelsmaeker/vae-lagging-encoder
GatedMaskedConv2d
false
16,597
[ "MIT" ]
173
b190239019a94c85858d188a0853886eb48ce4be
https://github.com/tom-pelsmaeker/vae-lagging-encoder/tree/b190239019a94c85858d188a0853886eb48ce4be
MaxPool2d
import torch import torch.nn as nn import torch.nn.functional as F class _SpikeMaxPoolNd(nn.Module): def __init__(self, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False): super(_SpikeMaxPoolNd, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation self.ceil_mode = ceil_mode self.return_indices = True def reset_state(self): pass class MaxPool2d(_SpikeMaxPoolNd): """Simple port of PyTorch MaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, x, trace): x = x x, idx = F.max_pool2d(x, self.kernel_size, self.stride, self. padding, self.dilation, self.ceil_mode, self.return_indices) trace = trace.view(-1)[idx.view(-1)] trace = trace.view(idx.shape) return x > 0, trace def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gt_index_max_pool2d_with_indices_0(in_ptr0, in_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 > tmp0 tmp32 = tl.full([1], 1, tl.int8) tmp33 = tl.full([1], 0, tl.int8) tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp3 > tmp2 tmp36 = tl.full([1], 2, tl.int8) tmp37 = tl.where(tmp35, tmp36, tmp34) tmp38 = tmp5 > tmp4 tmp39 = tl.full([1], 3, tl.int8) tmp40 = tl.where(tmp38, tmp39, tmp37) tmp41 = tmp7 > tmp6 tmp42 = tl.full([1], 4, tl.int8) tmp43 = tl.where(tmp41, tmp42, tmp40) tmp44 = tmp9 > tmp8 tmp45 = tl.full([1], 5, tl.int8) tmp46 = tl.where(tmp44, tmp45, tmp43) tmp47 = tmp11 > tmp10 tmp48 = tl.full([1], 6, tl.int8) tmp49 = tl.where(tmp47, tmp48, tmp46) tmp50 = tmp13 > tmp12 tmp51 = tl.full([1], 7, tl.int8) tmp52 = tl.where(tmp50, tmp51, tmp49) tmp53 = tmp15 > tmp14 tmp54 = tl.full([1], 8, tl.int8) tmp55 = tl.where(tmp53, tmp54, tmp52) tmp56 = tmp17 > tmp16 tmp57 = tl.full([1], 9, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp19 > tmp18 tmp60 = tl.full([1], 10, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp21 > tmp20 tmp63 = tl.full([1], 11, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp23 > tmp22 tmp66 = tl.full([1], 12, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp25 > tmp24 tmp69 = tl.full([1], 13, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp27 > tmp26 tmp72 = tl.full([1], 14, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp29 > tmp28 tmp75 = tl.full([1], 15, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = 0.0 tmp78 = tmp30 > tmp77 tmp79 = tl.full([1], 4, tl.int32) tmp80 = tl.where((tmp76 < 0) != (tmp79 < 0), tl.where(tmp76 % tmp79 != 0, tmp76 // tmp79 - 1, tmp76 // tmp79), tmp76 // tmp79) tmp81 = tmp80 * tmp79 tmp82 = tmp76 - tmp81 tmp83 = tl.full([1], 0, tl.int64) tmp84 = tmp83 + tmp80 tmp85 = tmp83 + tmp82 tmp86 = tl.full([1], 4, tl.int64) tmp87 = tmp84 * tmp86 tmp88 = tmp87 + tmp85 tmp89 = tl.full([XBLOCK], 256, tl.int32) tmp90 = tmp88 + tmp89 tmp91 = tmp88 < 0 tmp92 = tl.where(tmp91, tmp90, tmp88) tl.device_assert((0 <= tmp92) & (tmp92 < 256) | ~xmask, 'index out of bounds: 0 <= tmp92 < 256') tmp94 = tl.load(in_ptr1 + tmp92, xmask, eviction_policy='evict_last') tl.store(out_ptr2 + x0, tmp78, xmask) tl.store(out_ptr3 + x0, tmp94, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) buf3 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_gt_index_max_pool2d_with_indices_0[grid(16)](arg0_1, arg1_1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf2, reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0) class _SpikeMaxPoolNd(nn.Module): def __init__(self, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False): super(_SpikeMaxPoolNd, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation self.ceil_mode = ceil_mode self.return_indices = True def reset_state(self): pass class MaxPool2dNew(_SpikeMaxPoolNd): """Simple port of PyTorch MaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
tomking/PySNN
MaxPool2d
false
16,598
[ "MIT" ]
175
c99ba6cd28a518dc07cab765acac9b69ac6fe36b
https://github.com/tomking/PySNN/tree/c99ba6cd28a518dc07cab765acac9b69ac6fe36b
DisAlignFastRCNNOutputLayers
import torch import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) class DisAlignFastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(DisAlignFastRCNNOutputLayers, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.logit_scale = nn.Parameter(torch.ones(num_classes)) self.logit_bias = nn.Parameter(torch.zeros(num_classes)) self.confidence_layer = nn.Linear(input_size, 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.confidence_layer.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]: nn.init.constant_(layer.bias, 0) def forward(self, x): if x.dim() > 2: x = torch.flatten(x, start_dim=1) scores = self.cls_score(x) confidence = self.confidence_layer(x).sigmoid() scores_tmp = confidence * (scores[:, :-1] * self.logit_scale + self .logit_bias) scores_tmp = scores_tmp + (1 - confidence) * scores[:, :-1] aligned_scores = cat([scores_tmp, scores[:, -1].view(-1, 1)], dim=1) proposal_deltas = self.bbox_pred(x) return aligned_scores, proposal_deltas def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_classes': 4, 'cls_agnostic_bbox_reg': 4} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.load(in_ptr1 + (5 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr3 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tmp6 * tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp6 tmp15 = tmp14 * tmp7 tmp16 = tmp12 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp22 = tl.load(in_ptr1 + (4 + 5 * x1), tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.where(tmp4, tmp18, tmp22) tl.store(out_ptr0 + x2, tmp23, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (5, 4), (4, 1)) assert_size_stride(primals_3, (5,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 5), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(20)](buf2, buf0, primals_6, primals_7, buf3, 20, XBLOCK=32, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, primals_1, reinterpret_tensor( primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 return buf3, buf4, primals_1, primals_6, primals_7, buf0, buf2 def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) class DisAlignFastRCNNOutputLayersNew(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(DisAlignFastRCNNOutputLayersNew, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.logit_scale = nn.Parameter(torch.ones(num_classes)) self.logit_bias = nn.Parameter(torch.zeros(num_classes)) self.confidence_layer = nn.Linear(input_size, 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.confidence_layer.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]: nn.init.constant_(layer.bias, 0) def forward(self, input_0): primals_6 = self.logit_scale primals_7 = self.logit_bias primals_2 = self.cls_score.weight primals_3 = self.cls_score.bias primals_1 = self.bbox_pred.weight primals_9 = self.bbox_pred.bias primals_4 = self.confidence_layer.weight primals_5 = self.confidence_layer.bias primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
tonysy/cvpods
DisAlignFastRCNNOutputLayers
false
16,599
[ "Apache-2.0" ]
548
e322d7842ca0e34b1ef6237ea6d350633efc793a
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
RNN
import torch import torch.nn as nn from torch.autograd import Variable class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters): super(RNN, self).__init__() self.hidden_size = hidden_size self.all_categories = all_categories self.n_categories = n_categories self.all_letters = all_letters self.n_letters = n_letters self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.dropout = nn.Dropout(0.1) self.softmax = nn.LogSoftmax(dim=1) def forward(self, category, input_tensor, hidden): input_combined = torch.cat((category, input_tensor, hidden), 1) hidden = self.i2h(input_combined) output = self.i2o(input_combined) output_combined = torch.cat((hidden, output), 1) output = self.o2o(output_combined) output = self.dropout(output) output = self.softmax(output) return output, hidden def init_hidden(self): return Variable(torch.zeros(1, self.hidden_size)) @staticmethod def gen_input_tensor(all_letters, n_letters, line): tensor = torch.zeros(len(line), 1, n_letters) for li in range(len(line)): letter = line[li] tensor[li][0][all_letters.find(letter)] = 1 return tensor @staticmethod def gen_category_tensor(all_categories, n_categories, category): li = all_categories.index(category) tensor = torch.zeros(1, n_categories) tensor[0][li] = 1 return tensor def sample(self, category, start_letter='A'): category_tensor = Variable(self.gen_category_tensor(self. all_categories, self.n_categories, category)) input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter)) hidden = self.init_hidden() output_name = start_letter max_length = 20 for i in range(max_length): output, hidden = self.forward(category_tensor, input_tensor[0], hidden) _topv, topi = output.data.topk(1) topi = topi[0][0] if topi == self.n_letters - 1: break else: letter = self.all_letters[topi] output_name += letter input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter)) return output_name def samples(self, category, start_letters='ABC'): for start_letter in start_letters: yield self.sample(category, start_letter) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4, 'all_categories': 4, 'n_categories': 4, 'all_letters': 4, 'n_letters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 12), (12, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4), (8, 1), 4) extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf4, (4, 4), (8, 1), 0) triton_poi_fused_cat_1[grid(16)](buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__log_softmax_3[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf6 return buf7, buf1, buf0, buf4, buf7, primals_8 class RNNNew(nn.Module): def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters): super(RNNNew, self).__init__() self.hidden_size = hidden_size self.all_categories = all_categories self.n_categories = n_categories self.all_letters = all_letters self.n_letters = n_letters self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.dropout = nn.Dropout(0.1) self.softmax = nn.LogSoftmax(dim=1) def init_hidden(self): return Variable(torch.zeros(1, self.hidden_size)) @staticmethod def gen_input_tensor(all_letters, n_letters, line): tensor = torch.zeros(len(line), 1, n_letters) for li in range(len(line)): letter = line[li] tensor[li][0][all_letters.find(letter)] = 1 return tensor @staticmethod def gen_category_tensor(all_categories, n_categories, category): li = all_categories.index(category) tensor = torch.zeros(1, n_categories) tensor[0][li] = 1 return tensor def sample(self, category, start_letter='A'): category_tensor = Variable(self.gen_category_tensor(self. all_categories, self.n_categories, category)) input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter)) hidden = self.init_hidden() output_name = start_letter max_length = 20 for i in range(max_length): output, hidden = self.forward(category_tensor, input_tensor[0], hidden) _topv, topi = output.data.topk(1) topi = topi[0][0] if topi == self.n_letters - 1: break else: letter = self.all_letters[topi] output_name += letter input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter)) return output_name def samples(self, category, start_letters='ABC'): for start_letter in start_letters: yield self.sample(category, start_letter) def forward(self, input_0, input_1, input_2): primals_4 = self.i2h.weight primals_5 = self.i2h.bias primals_6 = self.i2o.weight primals_7 = self.i2o.bias primals_8 = self.o2o.weight primals_9 = self.o2o.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
tom-kuchler/vhive
RNN
false
16,600
[ "MIT" ]
138
ae1f2f5920e7607e9902ed1060bda62b56e332ac
https://github.com/tom-kuchler/vhive/tree/ae1f2f5920e7607e9902ed1060bda62b56e332ac
Upsample2d
from _paritybench_helpers import _mock_config import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Upsample2d(nn.Module): def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1): """ Upsample2d method in G_synthesis_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' self.gain = gain self.factor = factor self.opts = opts self.k = _setup_kernel(k) * (self.gain * factor ** 2) self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = torch.flip(self.k, [2, 3]) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[0] - self.factor self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1 ) // 2 + factor - 1 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] self.down = down def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, 1, inW, 1, 1)) y = F.pad(y, (0, 0, self.factor - 1, 0, 0, 0, self.factor - 1, 0, 0, 0, 0, 0)) y = torch.reshape(y, (-1, 1, inH * self.factor, inW * self.factor)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH * self.factor + self.pady0 + self.pady1, inW * self.factor + self.padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH * self.factor + self.pady0 + self.pady1 - self.kernelH + 1, inW * self.factor + self.padx0 + self.padx1 - self.kernelW + 1) if inH * self.factor != y.shape[1]: y = F.interpolate(y, size=(inH * self.factor, inW * self.factor ), mode='bilinear') y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH * self.factor, inW * self.factor) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opts': _mock_config()}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 72 % 2 x1 = xindex // 8 % 9 x0 = xindex % 8 x3 = xindex // 144 x7 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x1 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = -1 + (-1 + x1) % 2 tmp7 = tmp6 >= tmp1 tmp8 = -1 + x0 % 2 tmp9 = tmp8 >= tmp1 tmp10 = tmp7 & tmp9 tmp11 = tmp10 & tmp5 tmp12 = tl.load(in_ptr0 + (4 * ((-1 + x1) // 2 % 4) + 16 * x3 + x0 // 2 ), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + x7, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 144 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 8 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 4, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 5 * tmp13 + 25 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 5 * tmp13 + 25 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 5 * tmp9 + 25 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 5 * tmp9 + 25 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + x3, tmp44, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 4, 4), (16, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 2, 9, 8), (144, 72, 8, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(2304)](arg0_1, buf0, 2304, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 1, 8, 8), (64, 1, 8, 1), torch.float32) triton_poi_fused_convolution_1[grid(128, 8)](buf0, buf1, 128, 8, XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = extern_kernels.convolution(buf1, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (16, 1, 5, 5), (25, 1, 5, 1)) del arg1_1 buf3 = reinterpret_tensor(buf1, (16, 1, 8, 8), (64, 1024, 8, 1), 0) del buf1 buf5 = buf3 del buf3 buf6 = reinterpret_tensor(buf5, (16, 1, 8, 8), (64, 1, 8, 1), 0) del buf5 triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2[grid (1024)](buf6, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf2 return reinterpret_tensor(buf6, (4, 4, 8, 8), (256, 64, 8, 1), 0), def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Upsample2dNew(nn.Module): def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1): """ Upsample2d method in G_synthesis_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' self.gain = gain self.factor = factor self.opts = opts self.k = _setup_kernel(k) * (self.gain * factor ** 2) self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = torch.flip(self.k, [2, 3]) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[0] - self.factor self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1 ) // 2 + factor - 1 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] self.down = down def forward(self, input_0): arg1_1 = self.k arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
tomguluson92/StyleGAN2_PyTorch
Upsample2d
false
16,601
[ "MIT" ]
89
4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
https://github.com/tomguluson92/StyleGAN2_PyTorch/tree/4ab7354c85cb986d2b77f5238c4a18c5efd1db1b