entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
AMSoftmaxLoss
import torch import torch.nn as nn import torch.nn.functional as F class AMSoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLoss, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ assert len(x_BxH) == len(labels_B) assert torch.min(labels_B) >= 0 assert torch.max(labels_B) < self.speaker_num W = F.normalize(self.W, dim=0) x_BxH = F.normalize(x_BxH, dim=1) wf = torch.mm(x_BxH, W) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels_B]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels_B)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'hidden_dim': 4, 'speaker_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) del buf1 buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_sub_2[grid(4)](primals_2, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4 ), (1, 4), 0) class AMSoftmaxLossNew(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLossNew, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, input_0, input_1): primals_1 = self.W primals_3 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
AyushExel/s3prl
AMSoftmaxLoss
false
1,991
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
AdMSoftmaxLoss
import torch import torch.nn as nn import torch.nn.functional as F class AdMSoftmaxLoss(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super(AdMSoftmaxLoss, self).__init__() self.s = s self.m = m self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) def forward(self, x, labels): """ input shape (N, in_features) """ assert len(x) == len(labels) assert torch.min(labels) >= 0 assert torch.max(labels) < self.out_features for W in self.fc.parameters(): W = F.normalize(W, dim=1) x = F.normalize(x, dim=1) wf = self.fc(x) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x0 + 16 * tmp4 + 64 * x1), xmask) tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32) triton_poi_fused_mul_sub_1[grid(64)](primals_2, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class AdMSoftmaxLossNew(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super(AdMSoftmaxLossNew, self).__init__() self.s = s self.m = m self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) def forward(self, input_0, input_1): primals_3 = self.fc.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
AyushExel/s3prl
AdMSoftmaxLoss
false
1,992
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
Model
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim, output_class_num, **kwargs): super(Model, self).__init__() self.linear = nn.Linear(input_dim, output_class_num) def forward(self, features): pooled = features.mean(dim=1) predicted = self.linear(pooled) return predicted def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_class_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0) class ModelNew(nn.Module): def __init__(self, input_dim, output_class_num, **kwargs): super(ModelNew, self).__init__() self.linear = nn.Linear(input_dim, output_class_num) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AyushExel/s3prl
Model
false
1,993
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
OutConv
import torch import torch.utils.data import torch import torch.nn as nn class OutConv(nn.Module): def __init__(self, in_channels, out_channels): super(OutConv, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class OutConvNew(nn.Module): def __init__(self, in_channels, out_channels): super(OutConvNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AzmHmd/RMS
OutConv
false
1,994
[ "MIT" ]
0
61d108e118d1e06de324644ebd8d92fc1b091b91
https://github.com/AzmHmd/RMS/tree/61d108e118d1e06de324644ebd8d92fc1b091b91
SelfAttentionPooling
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_0[grid(64)](primals_4, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(256)](primals_1, primals_4, buf1, buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 return buf4, primals_1, primals_4, buf1 class SelfAttentionPoolingNew(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPoolingNew, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, input_0, input_1): primals_2 = self.W.weight primals_3 = self.W.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AyushExel/s3prl
SelfAttentionPooling
false
1,995
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
ResidualBlock
import torch import torch.onnx class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 4, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tl.where(xmask, tmp4, 0) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.full([XBLOCK, 1], 16, tl.int32) tmp12 = tmp11.to(tl.float32) tmp13 = tmp10 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp3 - tmp13 tmp21 = 16.0 tmp22 = tmp19 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp26 * tmp0 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp25, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf6 triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2, buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5, buf8, buf3, buf4, buf9, 576, XBLOCK=256, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = empty_strided_cuda((16,), (1,), torch.float32) buf11 = buf10 del buf10 buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4[grid (16)](buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 del primals_7 del primals_8 del primals_9 return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlockNew(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlockNew, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv1.conv2d.bias primals_4 = self.in1.weight primals_5 = self.in1.bias primals_6 = self.conv2.conv2d.weight primals_7 = self.conv2.conv2d.bias primals_8 = self.in2.weight primals_9 = self.in2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Ali-ry/azureml-examples
ResidualBlock
false
1,996
[ "MIT" ]
0
817ae89d2766dcafd70937a22cb3a80f100a2906
https://github.com/Ali-ry/azureml-examples/tree/817ae89d2766dcafd70937a22cb3a80f100a2906
AvgReadout
import torch import torch.nn as nn class AvgReadout(nn.Module): """ Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices .. math:: \\begin{equation} \\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)} \\end{equation} """ def __init__(self): super(AvgReadout, self).__init__() def forward(self, seq): return torch.mean(seq, 0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class AvgReadoutNew(nn.Module): """ Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices .. math:: \\begin{equation} \\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)} \\end{equation} """ def __init__(self): super(AvgReadoutNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BUPTlfq/OpenHGNN
AvgReadout
false
1,997
[ "Apache-2.0" ]
0
77041e68c33a8a42a2c187c6e42d85b81cbb25d3
https://github.com/BUPTlfq/OpenHGNN/tree/77041e68c33a8a42a2c187c6e42d85b81cbb25d3
BCEDiceLoss
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F class BCEDiceLoss(nn.Module): def __init__(self): super().__init__() def forward(self, output, target): bce = F.binary_cross_entropy_with_logits(output, target) smooth = 1e-05 output = torch.sigmoid(output) num = target.size(0) output = output.view(num, -1) target = target.view(num, -1) intersection = output * target dice = (2.0 * intersection.sum(1) + smooth) / (output.sum(1) + target.sum(1) + smooth) dice = 1 - dice.sum() / num return 0.5 * bce + dice def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp13 = tl.load(in_out_ptr0 + 0) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-05 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = 0.25 tmp20 = tmp12 * tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp23 = tmp18 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2[ grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 del buf3 return buf5, class BCEDiceLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AzmHmd/RMS
BCEDiceLoss
false
1,998
[ "MIT" ]
0
61d108e118d1e06de324644ebd8d92fc1b091b91
https://github.com/AzmHmd/RMS/tree/61d108e118d1e06de324644ebd8d92fc1b091b91
AP
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class AP(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(AP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, _ = self.sap_layer(feature_BxTxH, att_mask_BxT) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 return buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, primals_6, buf8, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class APNew(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(APNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.sap_layer.W_a.weight primals_5 = self.sap_layer.W_a.bias primals_6 = self.sap_layer.W.weight primals_7 = self.sap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
AyushExel/s3prl
AP
false
1,999
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
SAP
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep class SAP(nn.Module): """ Self Attention Pooling module incoporate attention mask""" def __init__(self, out_dim): super(SAP, self).__init__() self.act_fn = nn.Tanh() self.sap_layer = SelfAttentionPooling(out_dim) def forward(self, feature, att_mask): """ Arguments feature - [BxTxD] Acoustic feature with shape att_mask - [BxTx1] Attention Mask logits """ feature = self.act_fn(feature) sap_vec = self.sap_layer(feature, att_mask) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_4, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_4, buf2, buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del buf4 return buf5, primals_4, buf0, buf2 class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep class SAPNew(nn.Module): """ Self Attention Pooling module incoporate attention mask""" def __init__(self, out_dim): super(SAPNew, self).__init__() self.act_fn = nn.Tanh() self.sap_layer = SelfAttentionPooling(out_dim) def forward(self, input_0, input_1): primals_2 = self.sap_layer.W.weight primals_3 = self.sap_layer.W.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AyushExel/s3prl
SAP
false
2,000
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
Mish
import torch import torch.nn as nn import torch.nn.functional as F class Mish(nn.Module): def __init__(self): super(Mish, self).__init__() def forward(self, x): return x * torch.tanh(F.softplus(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MishNew(nn.Module): def __init__(self): super(MishNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BDeMo/yolov4-pytorch
Mish
false
2,001
[ "MIT" ]
0
2434afc88d0890bdb19c5655bb7c577d22bf18d3
https://github.com/BDeMo/yolov4-pytorch/tree/2434afc88d0890bdb19c5655bb7c577d22bf18d3
ResidualAttentionBlock
import torch from collections import OrderedDict from torch import nn class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: 'int', n_head: 'int', attn_mask: 'torch.Tensor'=None): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear( d_model * 4, d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask def attention(self, x: 'torch.Tensor'): self.attn_mask = self.attn_mask if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask )[0] def forward(self, x: 'torch.Tensor'): x = x + self.attention(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'n_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from collections import OrderedDict from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__safe_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__safe_softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf4, (1, 4, 1, 4), (16, 1, 16, 4), 0) del buf4 triton_poi_fused_mul_3[grid(16)](buf7, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 0), 0 ), reinterpret_tensor(buf7, (4, 1, 4), (1, 0, 4), 0), out=buf8) buf9 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_4[grid(64)](buf8, buf9, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_5[grid(64)](buf8, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32) triton_poi_fused_clone_6[grid(4, 4)](buf11, buf12, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_7 buf14 = buf1 del buf1 buf15 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(4)](primals_1, buf13, buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_1, buf13, buf14, buf15, primals_8, primals_9, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf14 del buf15 del primals_9 buf17 = reinterpret_tensor(buf9, (4, 16), (16, 1), 0) del buf9 extern_kernels.addmm(primals_11, buf16, reinterpret_tensor( primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_11 buf18 = reinterpret_tensor(buf8, (4, 16), (16, 1), 0) del buf8 triton_poi_fused_mul_sigmoid_9[grid(64)](buf17, buf18, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf18, reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf19) buf20 = buf19 del buf19 triton_poi_fused_add_10[grid(16)](buf20, primals_1, buf13, primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_13 return (buf20, primals_1, primals_8, buf2, buf10, reinterpret_tensor( buf12, (4, 4), (4, 1), 0), buf13, buf16, buf17, buf18, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4 ), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 4, 4), 0), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 16), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlockNew(nn.Module): def __init__(self, d_model: 'int', n_head: 'int', attn_mask: 'torch.Tensor'=None): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear( d_model * 4, d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask def attention(self, x: 'torch.Tensor'): self.attn_mask = self.attn_mask if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask )[0] def forward(self, input_0): primals_4 = self.attn.in_proj_weight primals_5 = self.attn.in_proj_bias primals_1 = self.attn.out_proj.weight primals_2 = self.attn.out_proj.bias primals_3 = self.ln_1.weight primals_7 = self.ln_1.bias primals_10 = self.mlp.c_fc.weight primals_11 = self.mlp.c_fc.bias primals_12 = self.mlp.c_proj.weight primals_8 = self.mlp.c_proj.bias primals_9 = self.ln_2.weight primals_13 = self.ln_2.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Artanic30/RentalPrediction
ResidualAttentionBlock
false
2,002
[ "MIT" ]
0
5804ab9b453d2a40bce2bb304c31efc98a803ed8
https://github.com/Artanic30/RentalPrediction/tree/5804ab9b453d2a40bce2bb304c31efc98a803ed8
MultiheadAttention
import torch import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float('-inf')).type_as(t) def _get_full_incremental_state_key(module_instance, key): module_name = module_instance.__class__.__name__ if not hasattr(module_instance, '_fairseq_instance_id'): INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[ module_name] return '{}.{}.{}'.format(module_name, module_instance. _fairseq_instance_id, key) def get_incremental_state(module, incremental_state, key): """Helper for getting incremental state for an nn.Module.""" full_key = _get_full_incremental_state_key(module, key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(module, incremental_state, key, value): """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = _get_full_incremental_state_key(module, key) incremental_state[full_key] = value class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self._mask = None self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) def forward(self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if 'prev_key' in saved_state: if static_kv: assert kv_same and not qkv_same key = value = None else: saved_state = None if qkv_same: q, k, v = self.in_proj_qkv(query) elif kv_same: q = self.in_proj_q(query) if key is None: assert value is None k = v = q.new(0) else: k, v = self.in_proj_kv(key) else: q = self.in_proj_q(query) k = self.in_proj_k(key) v = self.in_proj_v(value) q *= self.scaling if saved_state is not None: if 'prev_key' in saved_state: k = torch.cat((saved_state['prev_key'], k), dim=0) if 'prev_value' in saved_state: v = torch.cat((saved_state['prev_value'], v), dim=0) saved_state['prev_key'] = k saved_state['prev_value'] = v self._set_input_buffer(incremental_state, saved_state) src_len = k.size(0) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) k = k.contiguous().view(src_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) v = v.contiguous().view(src_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if mask_future_timesteps and incremental_state is None: assert query.size() == key.size( ), 'mask_future_timesteps only applies to self-attention' attn_weights += self.buffered_mask(attn_weights).unsqueeze(0) if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.float().masked_fill(key_padding_mask .unsqueeze(1).unsqueeze(2), float('-inf')).type_as(attn_weights ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as( attn_weights) attn_weights = F.dropout(attn_weights, p=self.dropout, training= self.training) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.sum(dim=1) / self.num_heads return attn, attn_weights def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query): return self._in_proj(query, end=self.embed_dim) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=None, end=None): weight = self.in_proj_weight bias = self.in_proj_bias if end is not None: weight = weight[:end, :] if bias is not None: bias = bias[:end] if start is not None: weight = weight[start:, :] if bias is not None: bias = bias[start:] return F.linear(input, weight, bias) def buffered_mask(self, tensor): dim = tensor.size(-1) if self._mask is None: self._mask = torch.triu(fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._mask.size(0) < dim: self._mask = torch.triu(fill_with_neg_inf(self._mask.resize_( dim, dim)), 1) return self._mask[:dim, :dim] def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return get_incremental_state(self, incremental_state, 'attn_state' ) or {} def _set_input_buffer(self, incremental_state, buffer): set_incremental_state(self, incremental_state, 'attn_state', buffer) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_div_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_7, reinterpret_tensor(buf8, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_7 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sum_4[grid(64)](buf6, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) return reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0 ), buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_6, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0 ), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0) def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float('-inf')).type_as(t) def _get_full_incremental_state_key(module_instance, key): module_name = module_instance.__class__.__name__ if not hasattr(module_instance, '_fairseq_instance_id'): INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[ module_name] return '{}.{}.{}'.format(module_name, module_instance. _fairseq_instance_id, key) def get_incremental_state(module, incremental_state, key): """Helper for getting incremental state for an nn.Module.""" full_key = _get_full_incremental_state_key(module, key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(module, incremental_state, key, value): """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = _get_full_incremental_state_key(module, key) incremental_state[full_key] = value class MultiheadAttentionNew(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self._mask = None self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query): return self._in_proj(query, end=self.embed_dim) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=None, end=None): weight = self.in_proj_weight bias = self.in_proj_bias if end is not None: weight = weight[:end, :] if bias is not None: bias = bias[:end] if start is not None: weight = weight[start:, :] if bias is not None: bias = bias[start:] return F.linear(input, weight, bias) def buffered_mask(self, tensor): dim = tensor.size(-1) if self._mask is None: self._mask = torch.triu(fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._mask.size(0) < dim: self._mask = torch.triu(fill_with_neg_inf(self._mask.resize_( dim, dim)), 1) return self._mask[:dim, :dim] def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return get_incremental_state(self, incremental_state, 'attn_state' ) or {} def _set_input_buffer(self, incremental_state, buffer): set_incremental_state(self, incremental_state, 'attn_state', buffer) def forward(self, input_0, input_1, input_2): primals_4 = self.in_proj_weight primals_5 = self.in_proj_bias primals_6 = self.out_proj.weight primals_7 = self.out_proj.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
ArkanDH/Team5-Inverse-Cooking-Stuff
MultiheadAttention
false
2,003
[ "MIT" ]
0
ec224918b25fb7a04aa09995e4d11804448df7dd
https://github.com/ArkanDH/Team5-Inverse-Cooking-Stuff/tree/ec224918b25fb7a04aa09995e4d11804448df7dd
Fire
import torch import torch.onnx import torch import torch.nn as nn class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand1x1_planes': 4, 'expand3x3_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.onnx import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp12 & xmask, other=0.0) tmp16 = tl.load(in_ptr3 + (-4 + x1), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf2, primals_5, buf3, primals_7, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf3, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf2, primals_5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_5 return buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf5, buf6 class FireNew(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(FireNew, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.squeeze.weight primals_2 = self.squeeze.bias primals_4 = self.expand1x1.weight primals_5 = self.expand1x1.bias primals_6 = self.expand3x3.weight primals_7 = self.expand3x3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AndySer37/pytorch-ssd-mobile
Fire
false
2,004
[ "MIT" ]
0
ec4935940ffa374edc1e9a7009c279e727e548d7
https://github.com/AndySer37/pytorch-ssd-mobile/tree/ec4935940ffa374edc1e9a7009c279e727e548d7
CNormalized_Linear
import math import torch import torch as th class CNormalized_Linear(th.nn.Module): """Linear layer with column-wise normalized input matrix.""" def __init__(self, in_features, out_features, bias=False): """Initialize the layer.""" super(CNormalized_Linear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = th.nn.Parameter(th.Tensor(out_features, in_features)) if bias: self.bias = th.nn.Parameter(th.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): """Reset the parameters.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): """Feed-forward through the network.""" return th.nn.functional.linear(input, self.weight.div(self.weight. pow(2).sum(0).sqrt())) def __repr__(self): """For print purposes.""" return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_pow_sqrt_sum_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class CNormalized_LinearNew(th.nn.Module): """Linear layer with column-wise normalized input matrix.""" def __init__(self, in_features, out_features, bias=False): """Initialize the layer.""" super(CNormalized_LinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = th.nn.Parameter(th.Tensor(out_features, in_features)) if bias: self.bias = th.nn.Parameter(th.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): """Reset the parameters.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): """For print purposes.""" return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None) + ')' def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
BadrYoubiIdrissi/CausalDiscoveryToolbox
CNormalized_Linear
false
2,005
[ "MIT" ]
0
1e729d002a64ea1942caecd21b9dc8cc217ea0e2
https://github.com/BadrYoubiIdrissi/CausalDiscoveryToolbox/tree/1e729d002a64ea1942caecd21b9dc8cc217ea0e2
ASP
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASP(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, att_w = self.ap_layer(feature_BxTxH, att_mask_BxT) variance = torch.sqrt(torch.sum(att_w * feature_BxTxH * feature_BxTxH, dim=1) - sap_vec ** 2 + 1e-08) statistic_pooling = torch.cat([sap_vec, variance], dim=-1) return statistic_pooling def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex // 4 % 64 x7 = xindex // 16 x8 = xindex % 256 x9 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x7, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x7, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x8, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x9, tmp9, xmask) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x6 = xindex % 64 x3 = xindex // 64 x4 = xindex // 4 % 16 x2 = xindex // 16 % 4 x0 = xindex % 4 x5 = xindex // 4 x8 = xindex tmp0 = tl.load(in_ptr0 + x6, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr5 + (x6 + 256 * x3), xmask) tmp45 = tl.load(in_ptr5 + (64 + x6 + 256 * x3), xmask) tmp48 = tl.load(in_ptr5 + (128 + x6 + 256 * x3), xmask) tmp51 = tl.load(in_ptr5 + (192 + x6 + 256 * x3), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tmp44 = tmp43 * tmp0 tmp46 = tmp45 * tmp10 tmp47 = tmp44 + tmp46 tmp49 = tmp48 * tmp21 tmp50 = tmp47 + tmp49 tmp52 = tmp51 * tmp32 tmp53 = tmp50 + tmp52 tmp54 = tmp42 * tmp42 tmp55 = tmp53 - tmp54 tmp56 = 1e-08 tmp57 = tmp55 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = 2.0 tmp60 = tmp58 * tmp59 tmp61 = tmp42 * tmp59 tl.store(out_ptr0 + (x0 + 8 * x5), tmp42, xmask) tl.store(out_ptr2 + (x0 + 8 * x5), tmp58, xmask) tl.store(out_ptr3 + x8, tmp60, xmask) tl.store(out_ptr4 + x8, tmp61, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(1024)](primals_8, buf4, buf5, buf6, buf0, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32 ) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0) buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_sqrt_sub_sum_3[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 return buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASPNew(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASPNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.ap_layer.W_a.weight primals_5 = self.ap_layer.W_a.bias primals_6 = self.ap_layer.W.weight primals_7 = self.ap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
AyushExel/s3prl
ASP
false
2,006
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
LinearDiag
import torch import torch.nn as nn import torch.optim import torch.nn.parallel class LinearDiag(nn.Module): def __init__(self, num_features, bias=False): super(LinearDiag, self).__init__() weight = torch.FloatTensor(num_features).fill_(1) self.weight = nn.Parameter(weight, requires_grad=True) if bias: bias = torch.FloatTensor(num_features).fill_(0) self.bias = nn.Parameter(bias, requires_grad=True) else: self.register_parameter('bias', None) def forward(self, X): assert X.dim() == 2 and X.size(1) == self.weight.size(0) out = X * self.weight.expand_as(X) if self.bias is not None: out = out + self.bias.expand_as(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf0, primals_1 class LinearDiagNew(nn.Module): def __init__(self, num_features, bias=False): super(LinearDiagNew, self).__init__() weight = torch.FloatTensor(num_features).fill_(1) self.weight = nn.Parameter(weight, requires_grad=True) if bias: bias = torch.FloatTensor(num_features).fill_(0) self.bias = nn.Parameter(bias, requires_grad=True) else: self.register_parameter('bias', None) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Basasuya/FewShotWithoutForgetting
LinearDiag
false
2,007
[ "MIT" ]
0
eecc70e416ed82999124ddfca1b145f6dbcd74a6
https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6
Discriminator
import torch import torch.nn as nn class Discriminator(nn.Module): """ The discriminator .. math:: \\begin{equation} \\mathcal{D}\\left(\\mathbf{h}_{i}^{(r)}, \\mathbf{s}^{(r)}\\right)=\\sigma\\left(\\mathbf{h}_{i}^{(r) T} \\mathbf{M}^{(r)} \\mathbf{s}^{(r)}\\right) \\end{equation} where :math:`M^{(r)}` is a trainable scoring matrix. """ def __init__(self, n_h): super(Discriminator, self).__init__() self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = c.expand_as(h_pl) sc_1 = torch.squeeze(self.f_k_bilinear(h_pl, c_x), 1) sc_2 = torch.squeeze(self.f_k_bilinear(h_mi, c_x), 1) if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 0) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_h': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1), tmp4 & xmask, other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1)), tmp11 & xmask, other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_2, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf1 = buf0 del buf0 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_5, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((8, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf1, primals_4, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del primals_4 return buf4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0) class DiscriminatorNew(nn.Module): """ The discriminator .. math:: \\begin{equation} \\mathcal{D}\\left(\\mathbf{h}_{i}^{(r)}, \\mathbf{s}^{(r)}\\right)=\\sigma\\left(\\mathbf{h}_{i}^{(r) T} \\mathbf{M}^{(r)} \\mathbf{s}^{(r)}\\right) \\end{equation} where :math:`M^{(r)}` is a trainable scoring matrix. """ def __init__(self, n_h): super(DiscriminatorNew, self).__init__() self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1, input_2): primals_3 = self.f_k_bilinear.weight primals_4 = self.f_k_bilinear.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BUPTlfq/OpenHGNN
Discriminator
false
2,008
[ "Apache-2.0" ]
0
77041e68c33a8a42a2c187c6e42d85b81cbb25d3
https://github.com/BUPTlfq/OpenHGNN/tree/77041e68c33a8a42a2c187c6e42d85b81cbb25d3
SpatialPyramidPooling
import torch import torch.nn as nn class SpatialPyramidPooling(nn.Module): def __init__(self, pool_sizes=[5, 9, 13]): super(SpatialPyramidPooling, self).__init__() self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size // 2) for pool_size in pool_sizes]) def forward(self, x): features = [maxpool(x) for maxpool in self.maxpools[::-1]] features = torch.cat(features + [x], dim=1) return features def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x7 = xindex x3 = xindex // 64 x4 = xindex % 64 tmp116 = tl.load(in_ptr0 + x7, xmask) tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -2 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-10 + x7), tmp10 & xmask, other=float('-inf')) tmp12 = -1 + x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-9 + x7), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-8 + x7), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 1 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + (-7 + x7), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = 2 + x0 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp5 & tmp36 tmp38 = tl.load(in_ptr0 + (-6 + x7), tmp37 & xmask, other=float('-inf')) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = -1 + x1 tmp41 = tmp40 >= tmp1 tmp42 = tmp40 < tmp3 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp9 tmp45 = tl.load(in_ptr0 + (-6 + x7), tmp44 & xmask, other=float('-inf')) tmp46 = triton_helpers.maximum(tmp45, tmp39) tmp47 = tmp43 & tmp15 tmp48 = tl.load(in_ptr0 + (-5 + x7), tmp47 & xmask, other=float('-inf')) tmp49 = triton_helpers.maximum(tmp48, tmp46) tmp50 = tmp43 & tmp22 tmp51 = tl.load(in_ptr0 + (-4 + x7), tmp50 & xmask, other=float('-inf')) tmp52 = triton_helpers.maximum(tmp51, tmp49) tmp53 = tmp43 & tmp29 tmp54 = tl.load(in_ptr0 + (-3 + x7), tmp53 & xmask, other=float('-inf')) tmp55 = triton_helpers.maximum(tmp54, tmp52) tmp56 = tmp43 & tmp36 tmp57 = tl.load(in_ptr0 + (-2 + x7), tmp56 & xmask, other=float('-inf')) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = x1 tmp60 = tmp59 >= tmp1 tmp61 = tmp59 < tmp3 tmp62 = tmp60 & tmp61 tmp63 = tmp62 & tmp9 tmp64 = tl.load(in_ptr0 + (-2 + x7), tmp63 & xmask, other=float('-inf')) tmp65 = triton_helpers.maximum(tmp64, tmp58) tmp66 = tmp62 & tmp15 tmp67 = tl.load(in_ptr0 + (-1 + x7), tmp66 & xmask, other=float('-inf')) tmp68 = triton_helpers.maximum(tmp67, tmp65) tmp69 = tmp62 & tmp22 tmp70 = tl.load(in_ptr0 + x7, tmp69 & xmask, other=float('-inf')) tmp71 = triton_helpers.maximum(tmp70, tmp68) tmp72 = tmp62 & tmp29 tmp73 = tl.load(in_ptr0 + (1 + x7), tmp72 & xmask, other=float('-inf')) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp62 & tmp36 tmp76 = tl.load(in_ptr0 + (2 + x7), tmp75 & xmask, other=float('-inf')) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = 1 + x1 tmp79 = tmp78 >= tmp1 tmp80 = tmp78 < tmp3 tmp81 = tmp79 & tmp80 tmp82 = tmp81 & tmp9 tmp83 = tl.load(in_ptr0 + (2 + x7), tmp82 & xmask, other=float('-inf')) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp81 & tmp15 tmp86 = tl.load(in_ptr0 + (3 + x7), tmp85 & xmask, other=float('-inf')) tmp87 = triton_helpers.maximum(tmp86, tmp84) tmp88 = tmp81 & tmp22 tmp89 = tl.load(in_ptr0 + (4 + x7), tmp88 & xmask, other=float('-inf')) tmp90 = triton_helpers.maximum(tmp89, tmp87) tmp91 = tmp81 & tmp29 tmp92 = tl.load(in_ptr0 + (5 + x7), tmp91 & xmask, other=float('-inf')) tmp93 = triton_helpers.maximum(tmp92, tmp90) tmp94 = tmp81 & tmp36 tmp95 = tl.load(in_ptr0 + (6 + x7), tmp94 & xmask, other=float('-inf')) tmp96 = triton_helpers.maximum(tmp95, tmp93) tmp97 = 2 + x1 tmp98 = tmp97 >= tmp1 tmp99 = tmp97 < tmp3 tmp100 = tmp98 & tmp99 tmp101 = tmp100 & tmp9 tmp102 = tl.load(in_ptr0 + (6 + x7), tmp101 & xmask, other=float('-inf')) tmp103 = triton_helpers.maximum(tmp102, tmp96) tmp104 = tmp100 & tmp15 tmp105 = tl.load(in_ptr0 + (7 + x7), tmp104 & xmask, other=float('-inf')) tmp106 = triton_helpers.maximum(tmp105, tmp103) tmp107 = tmp100 & tmp22 tmp108 = tl.load(in_ptr0 + (8 + x7), tmp107 & xmask, other=float('-inf')) tmp109 = triton_helpers.maximum(tmp108, tmp106) tmp110 = tmp100 & tmp29 tmp111 = tl.load(in_ptr0 + (9 + x7), tmp110 & xmask, other=float('-inf')) tmp112 = triton_helpers.maximum(tmp111, tmp109) tmp113 = tmp100 & tmp36 tmp114 = tl.load(in_ptr0 + (10 + x7), tmp113 & xmask, other=float('-inf')) tmp115 = triton_helpers.maximum(tmp114, tmp112) tl.store(out_ptr0 + (x4 + 256 * x3), tmp115, xmask) tl.store(out_ptr1 + (x4 + 256 * x3), tmp116, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 256 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [13, 13], [1, 1], [6, 6]) buf1 = buf0[0] del buf0 buf3 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [9, 9 ], [1, 1], [4, 4]) buf4 = buf3[0] del buf3 buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) buf6 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 128) buf9 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 192) get_raw_stream(0) triton_poi_fused_cat_max_pool2d_with_indices_0[grid(256)](arg0_1, buf6, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf7 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](buf1, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf8 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 64) triton_poi_fused_cat_1[grid(256)](buf4, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 return buf10, class SpatialPyramidPoolingNew(nn.Module): def __init__(self, pool_sizes=[5, 9, 13]): super(SpatialPyramidPoolingNew, self).__init__() self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size // 2) for pool_size in pool_sizes]) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BDeMo/yolov4-pytorch
SpatialPyramidPooling
false
2,009
[ "MIT" ]
0
2434afc88d0890bdb19c5655bb7c577d22bf18d3
https://github.com/BDeMo/yolov4-pytorch/tree/2434afc88d0890bdb19c5655bb7c577d22bf18d3
RelationCrossing
import torch import torch.nn as nn import torch.nn.functional as F class RelationCrossing(nn.Module): def __init__(self, in_feats: 'int', out_feats: 'int', num_heads: 'int', dropout: 'float'=0.0, negative_slope: 'float'=0.2): """ Relation crossing layer Parameters ---------- in_feats : pair of ints, input feature size out_feats : int, output feature size num_heads : int, number of heads in Multi-Head Attention dropout : float, optional, dropout rate, defaults: 0.0 negative_slope : float, optional, negative slope rate, defaults: 0.2 """ super(RelationCrossing, self).__init__() self._in_feats = in_feats self._out_feats = out_feats self._num_heads = num_heads self.dropout = nn.Dropout(dropout) self.leaky_relu = nn.LeakyReLU(negative_slope) def forward(self, dsttype_node_features: 'torch.Tensor', relations_crossing_attention_weight: 'nn.Parameter'): """ :param dsttype_node_features: a tensor of (dsttype_node_relations_num, num_dst_nodes, n_heads * hidden_dim) :param relations_crossing_attention_weight: Parameter the shape is (n_heads, hidden_dim) :return: output_features: a Tensor """ if len(dsttype_node_features) == 1: dsttype_node_features = dsttype_node_features.squeeze(dim=0) else: dsttype_node_features = dsttype_node_features.reshape( dsttype_node_features.shape[0], -1, self._num_heads, self. _out_feats) dsttype_node_relation_attention = (dsttype_node_features * relations_crossing_attention_weight).sum(dim=-1, keepdim=True) dsttype_node_relation_attention = F.softmax(self.leaky_relu( dsttype_node_relation_attention), dim=0) dsttype_node_features = (dsttype_node_features * dsttype_node_relation_attention).sum(dim=0) dsttype_node_features = self.dropout(dsttype_node_features) dsttype_node_features = dsttype_node_features.reshape(-1, self. _num_heads * self._out_feats) return dsttype_node_features def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_feats': 4, 'out_feats': 4, 'num_heads': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp11 = tmp10 > tmp1 tmp12 = tmp10 * tmp3 tmp13 = tl.where(tmp11, tmp10, tmp12) tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 > tmp1 tmp17 = tmp15 * tmp3 tmp18 = tl.where(tmp16, tmp15, tmp17) tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp21 = tmp20 > tmp1 tmp22 = tmp20 * tmp3 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = triton_helpers.maximum(tmp19, tmp23) tmp25 = tmp5 - tmp24 tmp26 = tl_math.exp(tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x2), xmask) tmp4 = tl.load(in_ptr1 + (16 + x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x2), xmask) tmp8 = tl.load(in_ptr1 + (32 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x2), xmask) tmp12 = tl.load(in_ptr1 + (48 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_leaky_relu_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused__softmax_mul_sum_3[grid(64)](arg0_1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf2 return reinterpret_tensor(buf3, (4, 16), (16, 1), 0), class RelationCrossingNew(nn.Module): def __init__(self, in_feats: 'int', out_feats: 'int', num_heads: 'int', dropout: 'float'=0.0, negative_slope: 'float'=0.2): """ Relation crossing layer Parameters ---------- in_feats : pair of ints, input feature size out_feats : int, output feature size num_heads : int, number of heads in Multi-Head Attention dropout : float, optional, dropout rate, defaults: 0.0 negative_slope : float, optional, negative slope rate, defaults: 0.2 """ super(RelationCrossingNew, self).__init__() self._in_feats = in_feats self._out_feats = out_feats self._num_heads = num_heads self.dropout = nn.Dropout(dropout) self.leaky_relu = nn.LeakyReLU(negative_slope) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
BUPTlfq/OpenHGNN
RelationCrossing
false
2,010
[ "Apache-2.0" ]
0
77041e68c33a8a42a2c187c6e42d85b81cbb25d3
https://github.com/BUPTlfq/OpenHGNN/tree/77041e68c33a8a42a2c187c6e42d85b81cbb25d3
LayerNorm
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, *args): super().__init__() def forward(self, activation): if len(activation.size()) == 3: ori_size = activation.size() activation = activation.view(-1, activation.size(-1)) else: ori_size = None means = torch.mean(activation, dim=1, keepdim=True) stds = torch.std(activation, dim=1, keepdim=True) activation = (activation - means) / stds if ori_size is not None: activation = activation.view(ori_size) return activation def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp10 / tmp24 tl.store(out_ptr0 + x3, tmp25, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerNormNew(nn.Module): def __init__(self, *args): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BaiYuhaoSpiceeYJ/SEGAN_denoise
LayerNorm
false
2,011
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
FeatExemplarAvgBlock
import torch import torch.nn as nn import torch.optim import torch.nn.parallel class FeatExemplarAvgBlock(nn.Module): def __init__(self, nFeat): super(FeatExemplarAvgBlock, self).__init__() def forward(self, features_train, labels_train): labels_train_transposed = labels_train.transpose(1, 2) weight_novel = torch.bmm(labels_train_transposed, features_train) weight_novel = weight_novel.div(labels_train_transposed.sum(dim=2, keepdim=True).expand_as(weight_novel)) return weight_novel def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'nFeat': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + x3, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), arg1_1, out=buf0) del arg1_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf1, class FeatExemplarAvgBlockNew(nn.Module): def __init__(self, nFeat): super(FeatExemplarAvgBlockNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Basasuya/FewShotWithoutForgetting
FeatExemplarAvgBlock
false
2,012
[ "MIT" ]
0
eecc70e416ed82999124ddfca1b145f6dbcd74a6
https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6
CombFilter
import torch import torch.nn as nn import torch.nn.functional as F class CombFilter(nn.Module): def __init__(self, ninputs, fmaps, L): super().__init__() self.L = L self.filt = nn.Conv1d(ninputs, fmaps, 2, dilation=L, bias=False) r_init_weight = torch.ones(ninputs * fmaps, 2) r_init_weight[:, 0] = torch.rand(r_init_weight.size(0)) self.filt.weight.data = r_init_weight.view(fmaps, ninputs, 2) def forward(self, x): x_p = F.pad(x, (self.L, 0)) y = self.filt(x_p) return y def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'ninputs': 4, 'fmaps': 4, 'L': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = -4 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 2), (8, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 8 ), (0, 8, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(4,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) return reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (1, 4, 8), (32, 8, 1), 0) class CombFilterNew(nn.Module): def __init__(self, ninputs, fmaps, L): super().__init__() self.L = L self.filt = nn.Conv1d(ninputs, fmaps, 2, dilation=L, bias=False) r_init_weight = torch.ones(ninputs * fmaps, 2) r_init_weight[:, 0] = torch.rand(r_init_weight.size(0)) self.filt.weight.data = r_init_weight.view(fmaps, ninputs, 2) def forward(self, input_0): primals_2 = self.filt.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
BaiYuhaoSpiceeYJ/SEGAN_denoise
CombFilter
false
2,013
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
GatedLinear
import torch from torch import nn from torch.nn import init as init class GatedLinear(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() self.lin1 = nn.Linear(in_ch, out_ch) self.lin2 = nn.Linear(in_ch, out_ch) self.sig = nn.Sigmoid() self.tanh = nn.Tanh() def forward(self, x): return self.tanh(self.lin1(x)) * self.sig(self.lin2(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1 class GatedLinearNew(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() self.lin1 = nn.Linear(in_ch, out_ch) self.lin2 = nn.Linear(in_ch, out_ch) self.sig = nn.Sigmoid() self.tanh = nn.Tanh() def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BaekduChoi/Halftoning_v2
GatedLinear
false
2,014
[ "BSD-3-Clause" ]
0
fdb7040e1a4044f23ef9c92757bbb90c23685afe
https://github.com/BaekduChoi/Halftoning_v2/tree/fdb7040e1a4044f23ef9c92757bbb90c23685afe
AttentionPool2d
import torch import torch.nn.functional as F from torch import nn class AttentionPool2d(nn.Module): def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute( 2, 0, 1) x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) x = x + self.positional_embedding[:, None, :] x, _ = F.multi_head_attention_forward(query=x, key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj. weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn= False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False) return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 17, tl.int64) tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x4, tmp16, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-8 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = 4 + y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 272 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 68 x3 = xindex // 68 tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = float('-inf') tmp12 = tmp0 == tmp11 tmp13 = tmp12 == 0 tmp14 = tmp13.to(tl.int64) tmp15 = tmp14 != 0 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = triton_helpers.any(tmp18, 1)[:, None] tmp20 = tmp19 == 0 tmp21 = tmp6 / tmp10 tmp22 = 0.0 tmp23 = tl.where(tmp20, tmp22, tmp21) tl.store(out_ptr3 + (r1 + 17 * x2 + 1184 * x3), tmp23, rmask & xmask) @triton.jit def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4624 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 289 x1 = xindex // 289 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 289 * (x1 % 4) + 1184 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 17 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 17 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (17, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2, buf1, 272, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((12,), (1,), torch.float32) triton_poi_fused_cat_2[grid(12)](primals_6, primals_7, primals_8, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(buf4, (4,), (1,), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta =1, out=buf5) del buf4 buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32) buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32) triton_poi_fused_mul_transpose_3[grid(16, 17)](buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0) del buf2 buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32) triton_poi_fused_mul_transpose_4[grid(16, 17)](buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) del buf3 del primals_6 del primals_7 del primals_8 buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8) buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32) triton_per_fused__safe_softmax_5[grid(272)](buf8, buf12, 272, 17, XBLOCK=1, num_warps=2, num_stages=1) buf13 = buf8 del buf8 triton_poi_fused_bmm_6[grid(4624)](buf12, buf13, 4624, XBLOCK=256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0) del buf7 extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14) del buf13 buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_7[grid(17, 16)](buf14, buf15, 17, 16, XBLOCK =16, YBLOCK=32, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0) del buf14 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 return reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor( buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3 class AttentionPool2dNew(nn.Module): def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, input_0): primals_2 = self.positional_embedding primals_3 = self.k_proj.weight primals_6 = self.k_proj.bias primals_4 = self.q_proj.weight primals_7 = self.q_proj.bias primals_5 = self.v_proj.weight primals_8 = self.v_proj.bias primals_9 = self.c_proj.weight primals_10 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Artanic30/RentalPrediction
AttentionPool2d
false
2,015
[ "MIT" ]
0
5804ab9b453d2a40bce2bb304c31efc98a803ed8
https://github.com/Artanic30/RentalPrediction/tree/5804ab9b453d2a40bce2bb304c31efc98a803ed8
GDeconv1DBlock
import torch import torch.nn as nn from torch.nn.utils.spectral_norm import spectral_norm def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class GDeconv1DBlock(nn.Module): def __init__(self, ninp, fmaps, kwidth, stride=4, bias=True, norm_type= None, act=None): super().__init__() pad = max(0, (stride - kwidth) // -2) self.deconv = nn.ConvTranspose1d(ninp, fmaps, kwidth, stride=stride, padding=pad) self.norm = build_norm_layer(norm_type, self.deconv, fmaps) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.kwidth = kwidth self.stride = stride def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, x): h = self.deconv(x) if self.kwidth % 2 != 0: h = h[:, :, :-1] h = self.forward_norm(h, self.norm) h = self.act(h) return h def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'ninp': 4, 'fmaps': 4, 'kwidth': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.utils.spectral_norm import spectral_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0 ,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class GDeconv1DBlockNew(nn.Module): def __init__(self, ninp, fmaps, kwidth, stride=4, bias=True, norm_type= None, act=None): super().__init__() pad = max(0, (stride - kwidth) // -2) self.deconv = nn.ConvTranspose1d(ninp, fmaps, kwidth, stride=stride, padding=pad) self.norm = build_norm_layer(norm_type, self.deconv, fmaps) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.kwidth = kwidth self.stride = stride def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, input_0): primals_1 = self.deconv.weight primals_2 = self.deconv.bias primals_4 = self.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
BaiYuhaoSpiceeYJ/SEGAN_denoise
GDeconv1DBlock
false
2,016
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
GLU
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel class GLU(nn.Module): def __init__(self): super(GLU, self).__init__() def forward(self, x): nc = x.size(1) assert nc % 2 == 0, 'channels dont divide 2!' nc = int(nc / 2) return x[:, :nc] * torch.sigmoid(x[:, nc:]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(128)](arg0_1, buf0, 128, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GLUNew(nn.Module): def __init__(self): super(GLUNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BedirYilmaz/picturate-mwml
GLU
false
2,017
[ "MIT" ]
0
e0dd1bb9df0e0ee5a9cbefba9ac7ada19a2cc41c
https://github.com/BedirYilmaz/picturate-mwml/tree/e0dd1bb9df0e0ee5a9cbefba9ac7ada19a2cc41c
MLP
import torch import torch.nn import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): """ This is just an MLP with 1 hidden layer """ def __init__(self, n_units, dropout=0.1): super(MLP, self).__init__() self.w_1 = nn.Linear(n_units, 2048) self.w_2 = nn.Linear(2048, n_units) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_units': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4), (4, 1)) assert_size_stride(primals_2, (2048,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1, primals_2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), primals_4, buf3 class MLPNew(nn.Module): """ This is just an MLP with 1 hidden layer """ def __init__(self, n_units, dropout=0.1): super(MLPNew, self).__init__() self.w_1 = nn.Linear(n_units, 2048) self.w_2 = nn.Linear(2048, n_units) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.w_1.weight primals_2 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AmineBellahsen/IFT6135_representation_learning
MLP
false
2,018
[ "MIT" ]
0
d93865a2e1d7b42d4808927ce928dc875a436730
https://github.com/AmineBellahsen/IFT6135_representation_learning/tree/d93865a2e1d7b42d4808927ce928dc875a436730
EncoderImageWeightNormPrecomp
import torch from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImageWeightNormPrecomp(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImageWeightNormPrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features, dim=-1) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_pow_sqrt_sum_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf4, buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4 , (64, 4), (4, 1), 0), buf3 def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImageWeightNormPrecompNew(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImageWeightNormPrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecompNew, self).load_state_dict(new_state ) def forward(self, input_0): primals_3 = self.fc.bias primals_1 = self.fc.weight_g primals_2 = self.fc.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Ballester/SCAN
EncoderImageWeightNormPrecomp
false
2,019
[ "Apache-2.0" ]
0
4a003f60d3e45e5dd16969745e4b182fe705e758
https://github.com/Ballester/SCAN/tree/4a003f60d3e45e5dd16969745e4b182fe705e758
EncoderImagePrecomp
import torch import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImagePrecomp(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImagePrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features, dim=-1) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sqrt_sum_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImagePrecompNew(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImagePrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecompNew, self).load_state_dict(new_state) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Ballester/SCAN
EncoderImagePrecomp
false
2,020
[ "Apache-2.0" ]
0
4a003f60d3e45e5dd16969745e4b182fe705e758
https://github.com/Ballester/SCAN/tree/4a003f60d3e45e5dd16969745e4b182fe705e758
SimpleMLP
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class SimpleMLP(nn.Module): def __init__(self): super(SimpleMLP, self).__init__() self.l1 = nn.Linear(4, 16) self.l2 = nn.Linear(16, 16) self.l3 = nn.Linear(16, 3) def forward(self, x): x = F.relu(self.l1(x)) x = F.relu(self.l2(x)) x = F.softmax(self.l3(x), dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = xindex // 48 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = xindex // 48 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (3, 16), (16, 1)) assert_size_stride(primals_7, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf8, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3, primals_5, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 3), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) triton_poi_fused__softmax_1[grid(192)](buf4, buf5, 192, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 3), (48, 12, 3, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(192)](buf5, buf6, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class SimpleMLPNew(nn.Module): def __init__(self): super(SimpleMLPNew, self).__init__() self.l1 = nn.Linear(4, 16) self.l2 = nn.Linear(16, 16) self.l3 = nn.Linear(16, 3) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Ali-ry/azureml-examples
SimpleMLP
false
2,021
[ "MIT" ]
0
817ae89d2766dcafd70937a22cb3a80f100a2906
https://github.com/Ali-ry/azureml-examples/tree/817ae89d2766dcafd70937a22cb3a80f100a2906
TVLoss
import torch import torch.nn as nn import torch.nn.parallel class TVLoss(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLoss, self).__init__() self.tv_loss_weight = tv_loss_weight def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) count_w = self.tensor_size(x[:, :, :, 1:]) h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum() w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum() return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w ) / batch_size @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 12 r1 = rindex // 12 r2 = rindex % 3 r3 = rindex // 3 tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 0.020833333333333332 tmp17 = tmp7 * tmp16 tmp18 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp22 = 0.25 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class TVLossNew(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLossNew, self).__init__() self.tv_loss_weight = tv_loss_weight @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Blatts01/VckImageRestoration
TVLoss
false
2,022
[ "MIT" ]
0
ae4e2221d9d4e236a08722cb92ac5cc88947e311
https://github.com/Blatts01/VckImageRestoration/tree/ae4e2221d9d4e236a08722cb92ac5cc88947e311
Linear3D
import math import torch import torch as th from torch.nn import Parameter def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3D(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3D, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj_matrix=None, permutation_matrix=None): input_ = [input] if input.dim() == 2: if permutation_matrix is not None: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, permutation_matrix.shape[1]])) elif hasattr(self, 'noise'): input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features - 1])) else: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features])) if adj_matrix is not None and permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ (adj_matrix.t(). unsqueeze(2) * permutation_matrix)).transpose(0, 1)) elif adj_matrix is not None: input_.append(input_[-1] * adj_matrix.t().unsqueeze(0)) elif permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ permutation_matrix).t() ) if hasattr(self, 'noise'): self.noise.normal_() input_.append(th.cat([input_[-1], self.noise], 2)) return functional_linear3d(input_[-1], self.weight, self.bias) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch as th from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_2, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_2[grid(256)](buf3, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (4, 4, 4, 4), (16, 64, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0) def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3DNew(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3DNew, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
BadrYoubiIdrissi/CausalDiscoveryToolbox
Linear3D
false
2,023
[ "MIT" ]
0
1e729d002a64ea1942caecd21b9dc8cc217ea0e2
https://github.com/BadrYoubiIdrissi/CausalDiscoveryToolbox/tree/1e729d002a64ea1942caecd21b9dc8cc217ea0e2
FullyConnected
import torch import torch.utils.data import torch.nn as nn def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class FullyConnected(nn.Module): def __init__(self, in_features, out_features, activation_fn=nn. functional.relu): super().__init__() self.fc = nn.Linear(in_features, out_features) _init_weights(self.fc) self.activation = activation_fn def forward(self, input): out = self.fc(input) if self.activation is not None: out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class FullyConnectedNew(nn.Module): def __init__(self, in_features, out_features, activation_fn=nn. functional.relu): super().__init__() self.fc = nn.Linear(in_features, out_features) _init_weights(self.fc) self.activation = activation_fn def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AntoBcc/benchmarking-gnns
FullyConnected
false
2,024
[ "MIT" ]
0
c5750054b2f4ba0822f203fa18d382f6a3b16542
https://github.com/AntoBcc/benchmarking-gnns/tree/c5750054b2f4ba0822f203fa18d382f6a3b16542
ResARModule
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.spectral_norm import spectral_norm def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class ResARModule(nn.Module): def __init__(self, ninp, fmaps, res_fmaps, kwidth, dilation, bias=True, norm_type=None, act=None): super().__init__() self.dil_conv = nn.Conv1d(ninp, fmaps, kwidth, dilation=dilation, bias=bias) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.dil_norm = build_norm_layer(norm_type, self.dil_conv, fmaps) self.kwidth = kwidth self.dilation = dilation self.conv_1x1_skip = nn.Conv1d(fmaps, ninp, 1, bias=bias) self.conv_1x1_skip_norm = build_norm_layer(norm_type, self. conv_1x1_skip, ninp) self.conv_1x1_res = nn.Conv1d(fmaps, res_fmaps, 1, bias=bias) self.conv_1x1_res_norm = build_norm_layer(norm_type, self. conv_1x1_res, res_fmaps) def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, x): kw__1 = self.kwidth - 1 P = kw__1 + kw__1 * (self.dilation - 1) x_p = F.pad(x, (P, 0)) h = self.dil_conv(x_p) h = self.forward_norm(h, self.dil_norm) h = self.act(h) a = h h = self.conv_1x1_skip(h) h = self.forward_norm(h, self.conv_1x1_skip_norm) y = x + h sh = self.conv_1x1_res(a) sh = self.forward_norm(sh, self.conv_1x1_res_norm) return y, sh def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'ninp': 4, 'fmaps': 4, 'res_fmaps': 4, 'kwidth': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.utils.spectral_norm import spectral_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = xindex // 7 x2 = xindex tmp0 = -3 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 7), (7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(28)](primals_1, buf0, 28, XBLOCK=32, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 7 ), (0, 7, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(16)](buf2, primals_3, primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4 ), (0, 4, 1), 0), primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 4), (16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused_add_2[grid(16)](buf5, primals_1, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_6 buf6 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4 ), (0, 4, 1), 0), primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf6, (1, 4, 4), (16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_3[grid(16)](buf7, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 return buf5, reinterpret_tensor(buf7, (4, 4), (4, 1), 0 ), primals_2, primals_4, primals_5, primals_7, reinterpret_tensor(buf0, (1, 4, 7), (28, 7, 1), 0), buf2, reinterpret_tensor(buf3, (1, 4, 4), (16, 4, 1), 0) def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class ResARModuleNew(nn.Module): def __init__(self, ninp, fmaps, res_fmaps, kwidth, dilation, bias=True, norm_type=None, act=None): super().__init__() self.dil_conv = nn.Conv1d(ninp, fmaps, kwidth, dilation=dilation, bias=bias) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.dil_norm = build_norm_layer(norm_type, self.dil_conv, fmaps) self.kwidth = kwidth self.dilation = dilation self.conv_1x1_skip = nn.Conv1d(fmaps, ninp, 1, bias=bias) self.conv_1x1_skip_norm = build_norm_layer(norm_type, self. conv_1x1_skip, ninp) self.conv_1x1_res = nn.Conv1d(fmaps, res_fmaps, 1, bias=bias) self.conv_1x1_res_norm = build_norm_layer(norm_type, self. conv_1x1_res, res_fmaps) def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, input_0): primals_2 = self.dil_conv.weight primals_3 = self.dil_conv.bias primals_4 = self.act.weight primals_5 = self.conv_1x1_skip.weight primals_6 = self.conv_1x1_skip.bias primals_7 = self.conv_1x1_res.weight primals_8 = self.conv_1x1_res.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
BaiYuhaoSpiceeYJ/SEGAN_denoise
ResARModule
false
2,025
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
CharbonnierLoss
import torch import torch.nn as nn import torch.nn.parallel class CharbonnierLoss(nn.Module): """Charbonnier Loss (L1)""" def __init__(self, eps=0.001): super(CharbonnierLoss, self).__init__() self.eps = eps def forward(self, x, y): diff = x - y loss = torch.mean(torch.sqrt(diff * diff + self.eps * self.eps)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1e-06 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_mul_sqrt_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class CharbonnierLossNew(nn.Module): """Charbonnier Loss (L1)""" def __init__(self, eps=0.001): super(CharbonnierLossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Blatts01/VckImageRestoration
CharbonnierLoss
false
2,026
[ "MIT" ]
0
ae4e2221d9d4e236a08722cb92ac5cc88947e311
https://github.com/Blatts01/VckImageRestoration/tree/ae4e2221d9d4e236a08722cb92ac5cc88947e311
SepConv2d
import torch import torch.nn as nn import torch.nn.parallel class SepConv2d(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, act_layer=nn.ReLU): super(SepConv2d, self).__init__() self.depthwise = torch.nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels) self.pointwise = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1) self.act_layer = act_layer() if act_layer is not None else nn.Identity( ) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride def forward(self, x): x = self.depthwise(x) x = self.act_layer(x) x = self.pointwise(x) return x def flops(self, H, W): flops = 0 flops += (H * W * self.in_channels * self.kernel_size ** 2 / self. stride ** 2) flops += H * W * self.in_channels * self.out_channels return flops def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class SepConv2dNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, act_layer=nn.ReLU): super(SepConv2dNew, self).__init__() self.depthwise = torch.nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels) self.pointwise = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1) self.act_layer = act_layer() if act_layer is not None else nn.Identity( ) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride def flops(self, H, W): flops = 0 flops += (H * W * self.in_channels * self.kernel_size ** 2 / self. stride ** 2) flops += H * W * self.in_channels * self.out_channels return flops def forward(self, input_0): primals_1 = self.depthwise.weight primals_2 = self.depthwise.bias primals_4 = self.pointwise.weight primals_5 = self.pointwise.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Blatts01/VckImageRestoration
SepConv2d
false
2,027
[ "MIT" ]
0
ae4e2221d9d4e236a08722cb92ac5cc88947e311
https://github.com/Blatts01/VckImageRestoration/tree/ae4e2221d9d4e236a08722cb92ac5cc88947e311
CNN
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 8 * 8 * 20) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=-1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 20 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 % 20 x2 = xindex // 400 x5 = xindex x4 = xindex // 8000 x6 = xindex % 8000 tmp0 = tl.load(in_ptr0 + (3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (60 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (61 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (62 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (120 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (121 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (122 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x5, tmp16, xmask) tl.store(out_ptr1 + (x6 + 8064 * x4), tmp41, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 35840 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 1280 x0 = xindex % 1280 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 25, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1280 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 28, tl.int64) tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 1280), (1280, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (10, 64), (64, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 20, 60, 60), (72320, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(288000)](buf0, primals_2, buf1, 288000, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 20, 20, 20), (8000, 400, 20, 1), torch.float32) buf3 = empty_strided_cuda((4, 20, 20, 20), (8064, 400, 20, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(32000)](buf1, buf2, buf3, 32000, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((28, 1280), (1280, 1), torch.float32) triton_poi_fused_2[grid(35840)](buf2, buf4, 35840, XBLOCK=512, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((28, 64), (64, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1280, 64), ( 1, 1280), 0), out=buf5) del buf4 buf6 = empty_strided_cuda((25, 64), (64, 1), torch.float32) triton_poi_fused_relu_3[grid(1600)](buf5, primals_5, buf6, 1600, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_5 buf7 = empty_strided_cuda((25, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_7 buf10 = empty_strided_cuda((25, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_4[grid(25)](buf7, buf10, 25, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf7 return buf10, primals_1, primals_3, buf1, buf3, reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0), buf6, buf10, primals_6, primals_4 class CNNNew(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Balandat/Ax
CNN
false
2,028
[ "MIT" ]
0
6c7556165291a5329744b5075d5f95d2dec18938
https://github.com/Balandat/Ax/tree/6c7556165291a5329744b5075d5f95d2dec18938
Delta
import torch import torch.nn as nn from torchaudio import transforms class Delta(nn.Module): def __init__(self, order=2, **kwargs): super(Delta, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, x): feats = [x] for o in range(self.order): feat = feats[-1].transpose(0, 1).unsqueeze(0) delta = self.compute_delta(feat) feats.append(delta.squeeze(0).transpose(0, 1)) x = torch.cat(feats, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchaudio import transforms assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * (x1 // 16) + 64 * (x1 // 4 % 4) + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = -2 + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = xindex // 12 x1 = xindex // 12 % 4 x2 = xindex // 48 % 4 x3 = xindex // 192 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + 64 * x2 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp18 = tl.load(in_ptr2 + (4 * x1 + 16 * x3 + 64 * x2 + (-8 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x5, tmp23, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0 del buf0 triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf2 del buf5 return buf6, class DeltaNew(nn.Module): def __init__(self, order=2, **kwargs): super(DeltaNew, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AyushExel/s3prl
Delta
false
2,029
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
InnerProductDecoder
import torch import torch.utils.data class InnerProductDecoder(torch.nn.Module): """The inner product decoder from the `"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_ paper .. math:: \\sigma(\\mathbf{Z}\\mathbf{Z}^{\\top}) where :math:`\\mathbf{Z} \\in \\mathbb{R}^{N \\times d}` denotes the latent space produced by the encoder.""" def forward(self, z, edge_index, sigmoid=True): """Decodes the latent variables :obj:`z` into edge probabilities for the given node-pairs :obj:`edge_index`. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1) return torch.sigmoid(value) if sigmoid else value def forward_all(self, z, sigmoid=True): """Decodes the latent variables :obj:`z` into a probabilistic dense adjacency matrix. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ adj = torch.matmul(z, z.t()) return torch.sigmoid(adj) if sigmoid else adj def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype =torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp7 = tl.load(in_ptr0 + (4 + x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + 4 * tmp4, xmask, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask, 'index out of bounds: 0 <= tmp10 < 4') tmp12 = tl.load(in_ptr1 + 4 * tmp10, xmask, eviction_policy='evict_last') tmp13 = tmp6 * tmp12 tmp14 = tl.load(in_ptr1 + (1 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tl.load(in_ptr1 + (2 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (2 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl.load(in_ptr1 + (3 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (3 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25.to(tl.float32) tmp27 = tl.sigmoid(tmp26) tl.store(out_ptr1 + x0, tmp27, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sigmoid_sum_0[grid(4)](arg0_1, arg1_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class InnerProductDecoderNew(torch.nn.Module): """The inner product decoder from the `"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_ paper .. math:: \\sigma(\\mathbf{Z}\\mathbf{Z}^{\\top}) where :math:`\\mathbf{Z} \\in \\mathbb{R}^{N \\times d}` denotes the latent space produced by the encoder.""" def forward_all(self, z, sigmoid=True): """Decodes the latent variables :obj:`z` into a probabilistic dense adjacency matrix. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ adj = torch.matmul(z, z.t()) return torch.sigmoid(adj) if sigmoid else adj def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CFF-Dream/pytorch_geometric
InnerProductDecoder
false
2,030
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
DenseGraphConv
import math import torch from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseGraphConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super(DenseGraphConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def forward(self, x, adj, mask=None): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() out = torch.matmul(adj, x) out = torch.matmul(out, self.weight) if self.aggr == 'mean': out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) elif self.aggr == 'max': out = out.max(dim=-1)[0] out = out + self.lin(x) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseGraphConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super(DenseGraphConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.lin.weight primals_5 = self.lin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CFF-Dream/pytorch_geometric
DenseGraphConv
false
2,031
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
MultiHeadAttention
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data class MultiHeadAttention(nn.Module): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, block_length=None, proximal_bias= False, proximal_init=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores * block_mask + -10000.0 * (1 - block_mask) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): 2 * self.window_size + 1 pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, commons .convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ 0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf3, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf4 = buf1 del buf1 triton_poi_fused_convolution_0[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = buf2 del buf2 triton_poi_fused_convolution_0[grid(64)](buf8, primals_8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_0[grid(64)](buf11, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class MultiHeadAttentionNew(nn.Module): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, block_length=None, proximal_bias= False, proximal_init=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) nn.init.xavier_uniform_(self.conv_v.weight) def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores * block_mask + -10000.0 * (1 - block_mask) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): 2 * self.window_size + 1 pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, commons .convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ 0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def forward(self, input_0, input_1): primals_1 = self.conv_q.weight primals_2 = self.conv_q.bias primals_4 = self.conv_k.weight primals_5 = self.conv_k.bias primals_7 = self.conv_v.weight primals_8 = self.conv_v.bias primals_9 = self.conv_o.weight primals_10 = self.conv_o.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
AndreHe02/glow-tts
MultiHeadAttention
false
2,032
[ "MIT" ]
0
683f68f17790f2f46c23e9d3eadbcac352d82e2b
https://github.com/AndreHe02/glow-tts/tree/683f68f17790f2f46c23e9d3eadbcac352d82e2b
DenseSAGEConv
import math import torch from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class DenseSAGEConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.SAGEConv`. """ def __init__(self, in_channels, out_channels, normalize=False, bias=True): super(DenseSAGEConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.lin_rel = Linear(in_channels, out_channels, bias=False) self.lin_root = Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): self.lin_rel.reset_parameters() self.lin_root.reset_parameters() def forward(self, x, adj, mask=None): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) add_loop (bool, optional): If set to :obj:`False`, the layer will not automatically add self-loops to the adjacency matrices. (default: :obj:`True`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() out = torch.matmul(adj, x) out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) out = self.lin_rel(out) + self.lin_root(x) if self.normalize: out = F.normalize(out, p=2, dim=-1) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clamp_div_sum_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(in_out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clamp_div_sum_1[grid(256)](buf2, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (4, 1), 0), out=buf3) del primals_3 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf4) del primals_4 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_2[grid(256)](buf5, buf4, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_5 return buf5, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0 ), reinterpret_tensor(buf2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class DenseSAGEConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.SAGEConv`. """ def __init__(self, in_channels, out_channels, normalize=False, bias=True): super(DenseSAGEConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.lin_rel = Linear(in_channels, out_channels, bias=False) self.lin_root = Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): self.lin_rel.reset_parameters() self.lin_root.reset_parameters() def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.lin_rel.weight primals_4 = self.lin_root.weight primals_5 = self.lin_root.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0] class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None)
CFF-Dream/pytorch_geometric
DenseSAGEConv
false
2,033
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
ShiftedSoftplus
import torch import torch.nn.functional as F import torch.utils.data class ShiftedSoftplus(torch.nn.Module): def __init__(self): super(ShiftedSoftplus, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, x): return F.softplus(x) - self.shift def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.6931471824645996 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ShiftedSoftplusNew(torch.nn.Module): def __init__(self): super(ShiftedSoftplusNew, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CFF-Dream/pytorch_geometric
ShiftedSoftplus
false
2,034
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
ResidualLayer
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayer(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayer, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, x): return x + self.act(self.lin2(self.act(self.lin1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](primals_2, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayerNew(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayerNew, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, input_0): primals_1 = self.lin1.weight primals_3 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CFF-Dream/pytorch_geometric
ResidualLayer
false
2,035
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
Envelope
import torch import torch.utils.data class Envelope(torch.nn.Module): def __init__(self, exponent): super(Envelope, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, x): p, a, b, c = self.p, self.a, self.b, self.c x_pow_p0 = x.pow(p) x_pow_p1 = x_pow_p0 * x return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'exponent': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp0 tmp6 = tmp5 * tmp5 tmp7 = -15.0 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tmp10 = tmp6 * tmp0 tmp11 = 24.0 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = -10.0 tmp15 = tmp10 * tmp14 tmp16 = tmp15 * tmp0 tmp17 = tmp13 + tmp16 tl.store(out_ptr0 + x0, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class EnvelopeNew(torch.nn.Module): def __init__(self, exponent): super(EnvelopeNew, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CFF-Dream/pytorch_geometric
Envelope
false
2,036
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
CuboidPoseHead
import torch import torch.nn as nn import torch.nn.functional as F class CuboidPoseHead(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHead, self).__init__() self.beta = beta self.loss = nn.L1Loss() def forward(self, heatmap_volumes, grid_coordinates): """ Args: heatmap_volumes (torch.Tensor(NxKxLxWxH)): 3D human pose heatmaps predicted by the network. grid_coordinates (torch.Tensor(Nx(LxWxH)x3)): Coordinates of the grids in the heatmap volumes. Returns: human_poses (torch.Tensor(NxKx3)): Coordinates of human poses. """ batch_size = heatmap_volumes.size(0) channel = heatmap_volumes.size(1) x = heatmap_volumes.reshape(batch_size, channel, -1, 1) x = F.softmax(self.beta * x, dim=2) grid_coordinates = grid_coordinates.unsqueeze(1) x = torch.mul(x, grid_coordinates) human_poses = torch.sum(x, dim=2) return human_poses def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'beta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class CuboidPoseHeadNew(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHeadNew, self).__init__() self.beta = beta self.loss = nn.L1Loss() def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
CuboidPoseHead
false
2,037
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
Attention
import math import torch import torch.nn.functional as F import torch.utils.data def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1, buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf4 = buf0 del buf0 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class AttentionNew(torch.nn.Module): def __init__(self, dropout=0): super(AttentionNew, self).__init__() self.dropout = dropout def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
CFF-Dream/pytorch_geometric
Attention
false
2,038
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
GlobalAttentionGeneral
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneral(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneral, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input, context): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if self.mask is not None: mask = self.mask.repeat(queryL, 1) attn.data.masked_fill_(mask.data, -float('inf')) attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'idf': 4, 'cdf': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask) tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1, buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6 def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneralNew(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneralNew, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input_0, input_1): primals_3 = self.conv_context.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
BedirYilmaz/cycle-image-gan
GlobalAttentionGeneral
false
2,039
[ "MIT" ]
0
a64da5774ec522c0322e9c21437dc9c066a50a89
https://github.com/BedirYilmaz/cycle-image-gan/tree/a64da5774ec522c0322e9c21437dc9c066a50a89
FocalLoss
import torch from torch import Tensor import torch.nn as nn from typing import Optional from typing import Union import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred: 'Tensor', target: 'Tensor', weight: 'Optional[Tensor]'=None, gamma: 'float'=2.0, alpha: 'Union[float, Tensor]'=0.25, reduction: 'str'='mean', avg_factor: 'Optional[float]'=None) ->Tensor: """Sigmoid focal loss. Args: pred: The prediction with shape (N, \\*). target: The ground truth label of the prediction with shape (N, \\*). weight: Sample-wise loss weight with shape (N, ). Defaults to None. gamma: The gamma for calculating the modulating factor. Defaults to 2.0. alpha: A balanced form for Focal Loss. If it is a float, then a global balanced form is applied. If it is Tensor with shape (N, \\*) or any shape that are broadcast-compatible with `pred`. reduction: The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor: Average factor that is used to average the loss. Defaults to None. Returns: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class FocalLoss(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super(FocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Sigmoid focal loss. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*), N or (N,1). Note that the target must be one-hot encoded weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, \\*). Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The method used to reduce the loss into a scalar. Options are "none", "mean" and "sum". Defaults to None. Returns: torch.Tensor: Loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_cls = self.loss_weight * sigmoid_focal_loss(pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor import torch.nn as nn from typing import Optional from typing import Union import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 0.25 tmp14 = tmp0 * tmp13 tmp15 = 0.75 tmp16 = tmp2 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tl.sigmoid(tmp3) tmp19 = tmp1 - tmp18 tmp20 = tmp19 * tmp0 tmp21 = tmp18 * tmp2 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp17 * tmp23 tmp25 = tmp12 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tmp31 = tmp30 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred: 'Tensor', target: 'Tensor', weight: 'Optional[Tensor]'=None, gamma: 'float'=2.0, alpha: 'Union[float, Tensor]'=0.25, reduction: 'str'='mean', avg_factor: 'Optional[float]'=None) ->Tensor: """Sigmoid focal loss. Args: pred: The prediction with shape (N, \\*). target: The ground truth label of the prediction with shape (N, \\*). weight: Sample-wise loss weight with shape (N, ). Defaults to None. gamma: The gamma for calculating the modulating factor. Defaults to 2.0. alpha: A balanced form for Focal Loss. If it is a float, then a global balanced form is applied. If it is Tensor with shape (N, \\*) or any shape that are broadcast-compatible with `pred`. reduction: The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor: Average factor that is used to average the loss. Defaults to None. Returns: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class FocalLossNew(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super(FocalLossNew, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CAMP-eXplain-AI/imba-explain
FocalLoss
false
2,040
[ "MIT" ]
0
e41b4ca5de63955cb0e925aad9599f38c5a3e973
https://github.com/CAMP-eXplain-AI/imba-explain/tree/e41b4ca5de63955cb0e925aad9599f38c5a3e973
Discriminator
import torch import torch.nn.functional as F import torch.utils.data class Discriminator(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super(Discriminator, self).__init__() self.lin1 = torch.nn.Linear(in_channels, hidden_channels) self.lin2 = torch.nn.Linear(hidden_channels, hidden_channels) self.lin3 = torch.nn.Linear(hidden_channels, out_channels) def forward(self, x): x = F.relu(self.lin1(x)) x = F.relu(self.lin2(x)) x = self.lin3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'hidden_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6 class DiscriminatorNew(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super(DiscriminatorNew, self).__init__() self.lin1 = torch.nn.Linear(in_channels, hidden_channels) self.lin2 = torch.nn.Linear(hidden_channels, hidden_channels) self.lin3 = torch.nn.Linear(hidden_channels, out_channels) def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_6 = self.lin3.weight primals_7 = self.lin3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
CFF-Dream/pytorch_geometric
Discriminator
false
2,041
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
MNIST_CNN
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class SqueezeLastTwo(nn.Module): """A module which squeezes the last two dimensions, ordinary squeeze can be a problem for batch size 1""" def __init__(self): super(SqueezeLastTwo, self).__init__() def forward(self, x): return x.view(x.shape[0], x.shape[1]) class MNIST_CNN(nn.Module): """ Hand-tuned architecture for MNIST. Weirdness I've noticed so far with this architecture: - adding a linear layer after the mean-pool in features hurts RotatedMNIST-100 generalization severely. """ n_outputs = 128 def __init__(self, input_shape): super(MNIST_CNN, self).__init__() self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.squeezeLastTwo = SqueezeLastTwo() def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.bn0(x) x = self.conv2(x) x = F.relu(x) x = self.bn1(x) x = self.conv3(x) x = F.relu(x) x = self.bn2(x) x = self.conv4(x) x = F.relu(x) x = self.bn3(x) x = self.avgpool(x) x = self.squeezeLastTwo(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_5(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex % 8 r3 = rindex // 8 x0 = xindex % 8 x1 = xindex // 8 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 64 * r3 + 1024 * x1), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 128, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 128.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr2 + x4, tmp23, xmask) tl.store(out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr1 + x4, tmp18, xmask) @triton.jit def triton_poi_fused_native_group_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = xindex // 1024 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 128.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 8 x1 = xindex // 8 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 128 * r3 + 512 * x1), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr2 + x4, tmp23, xmask) tl.store(out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr1 + x4, tmp18, xmask) @triton.jit def triton_poi_fused_native_group_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x2 = xindex // 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 64.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_mean_native_group_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1), xmask) tmp3 = tl.load(in_ptr1 + x2 // 16, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2 // 16, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (128 + x0 + 512 * x1), xmask) tmp23 = tl.load(in_ptr0 + (256 + x0 + 512 * x1), xmask) tmp30 = tl.load(in_ptr0 + (384 + x0 + 512 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 64.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = tmp17 - tmp3 tmp19 = tmp18 * tmp10 tmp20 = tmp19 * tmp12 tmp21 = tmp20 + tmp14 tmp22 = tmp15 + tmp21 tmp24 = triton_helpers.maximum(tmp1, tmp23) tmp25 = tmp24 - tmp3 tmp26 = tmp25 * tmp10 tmp27 = tmp26 * tmp12 tmp28 = tmp27 + tmp14 tmp29 = tmp22 + tmp28 tmp31 = triton_helpers.maximum(tmp1, tmp30) tmp32 = tmp31 - tmp3 tmp33 = tmp32 * tmp10 tmp34 = tmp33 * tmp12 tmp35 = tmp34 + tmp14 tmp36 = tmp29 + tmp35 tmp37 = 4.0 tmp38 = tmp36 / tmp37 tl.store(out_ptr0 + x2, tmp38, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128,), (1,)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4, 3, 3), (36, 1, 12, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(256, 9)](primals_1, buf0, 256, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_6, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(16384, 9)](primals_10, buf3, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(16384, 9)](primals_14, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 4, 4), (1024, 1, 256, 64)) buf6 = buf5 del buf5 triton_poi_fused_convolution_4[grid(4096)](buf6, primals_2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf7 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf8 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf11 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_5[grid(32)](buf6, buf7, buf8, buf11, 32, 128, XBLOCK=8, num_warps=8, num_stages=1) buf10 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_native_group_norm_6[grid(4096)](buf6, buf7, buf8, primals_4, primals_5, buf10, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf12 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 2, 2), (512, 1, 256, 128)) buf13 = buf12 del buf12 triton_poi_fused_convolution_7[grid(2048)](buf13, primals_7, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf14 = buf8 del buf8 buf15 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf18 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_8[grid(32)](buf13, buf14, buf15, buf18, 32, 64, XBLOCK=32, num_warps=8, num_stages=1) buf17 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_native_group_norm_9[grid(2048)](buf13, buf14, buf15, primals_8, primals_9, buf17, 2048, XBLOCK=256, num_warps =4, num_stages=1) del primals_9 buf19 = extern_kernels.convolution(buf17, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 2, 2), (512, 1, 256, 128)) buf20 = buf19 del buf19 triton_poi_fused_convolution_7[grid(2048)](buf20, primals_11, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf21 = buf15 del buf15 buf22 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_8[grid(32)](buf20, buf21, buf22, buf25, 32, 64, XBLOCK=32, num_warps=8, num_stages=1) buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_native_group_norm_9[grid(2048)](buf20, buf21, buf22, primals_12, primals_13, buf24, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf26 = extern_kernels.convolution(buf24, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 2, 2), (512, 1, 256, 128)) buf27 = buf26 del buf26 triton_poi_fused_convolution_7[grid(2048)](buf27, primals_15, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf28 = buf22 del buf22 buf29 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf31 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_8[grid(32)](buf27, buf28, buf29, buf31, 32, 64, XBLOCK=32, num_warps=8, num_stages=1) buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch. float32) triton_poi_fused_mean_native_group_norm_10[grid(512)](buf27, buf28, buf29, primals_16, primals_17, buf32, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf29 del primals_17 return (reinterpret_tensor(buf32, (4, 128), (128, 1), 0), buf0, buf1, primals_4, buf2, primals_8, buf3, primals_12, buf4, primals_16, buf6, buf10, reinterpret_tensor(buf7, (4, 8), (8, 1), 0), reinterpret_tensor(buf11, (4, 8), (8, 1), 0), buf13, buf17, reinterpret_tensor(buf14, (4, 8), (8, 1), 0), reinterpret_tensor( buf18, (4, 8), (8, 1), 0), buf20, buf24, reinterpret_tensor(buf21, (4, 8), (8, 1), 0), reinterpret_tensor(buf25, (4, 8), (8, 1), 0), buf27, reinterpret_tensor(buf28, (4, 8), (8, 1), 0), reinterpret_tensor(buf31, (4, 8), (8, 1), 0)) class SqueezeLastTwo(nn.Module): """A module which squeezes the last two dimensions, ordinary squeeze can be a problem for batch size 1""" def __init__(self): super(SqueezeLastTwo, self).__init__() def forward(self, x): return x.view(x.shape[0], x.shape[1]) class MNIST_CNNNew(nn.Module): """ Hand-tuned architecture for MNIST. Weirdness I've noticed so far with this architecture: - adding a linear layer after the mean-pool in features hurts RotatedMNIST-100 generalization severely. """ n_outputs = 128 def __init__(self, input_shape): super(MNIST_CNNNew, self).__init__() self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.squeezeLastTwo = SqueezeLastTwo() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_10 = self.conv3.weight primals_8 = self.conv3.bias primals_14 = self.conv4.weight primals_9 = self.conv4.bias primals_4 = self.bn0.weight primals_5 = self.bn0.bias primals_11 = self.bn1.weight primals_12 = self.bn1.bias primals_13 = self.bn2.weight primals_15 = self.bn2.bias primals_16 = self.bn3.weight primals_17 = self.bn3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
AllenPu/DomainBed
MNIST_CNN
false
2,042
[ "MIT" ]
0
77519d71471e67f0356134abe0bf01a6dd2fdcfa
https://github.com/AllenPu/DomainBed/tree/77519d71471e67f0356134abe0bf01a6dd2fdcfa
SelfAttention
import torch import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, embed_dims, heads): super(SelfAttention, self).__init__() self.heads = heads self.embed_dims = embed_dims self.depth = embed_dims // heads self.query = nn.Linear(self.depth, self.depth) self.key = nn.Linear(self.depth, self.depth) self.value = nn.Linear(self.depth, self.depth) self.fc_out = nn.Linear(self.depth * self.heads * 2, self.embed_dims) def forward(self, query, key, value, mask, isDecoder=False): batch, q_len, k_len, v_len = query.shape[0], query.shape[1], key.shape[ 1], value.shape[1] query = query.reshape(batch, q_len, self.heads, self.depth) key = key.reshape(batch, k_len, self.heads, self.depth) value = value.reshape(batch, v_len, self.heads, self.depth) query = self.query(query) key = self.key(key) value = self.value(value) energy = torch.einsum('bqhd, bkhd -> bhqk', [query, key]) if isDecoder: None None None None if mask is not None: if isDecoder: None energy = energy.masked_fill(mask == 0, float('-1e20')) if isDecoder: None None energy = torch.softmax(energy / (self.depth ** 1 / 2), dim=-1) out = torch.einsum('bhqv, bvhd -> bqhd', [energy, value]) out = out.reshape(batch, q_len, self.heads * self.depth) query = query.reshape(batch, q_len, self.heads * self.depth) out = torch.cat([query, out], dim=-1) out = self.fc_out(out) return out, energy def get_inputs(): return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand( [4, 4, 4, 1]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_dims': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr2 + (4 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp15 = tl.load(in_ptr2 + (8 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr2 + (12 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp10 = tmp1 * tmp9 tmp11 = tl.where(tmp8, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tl.where(tmp14, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tl.where(tmp20, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = 2.0 tmp28 = tmp26 * tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = tmp12 - tmp25 tmp31 = tmp30 * tmp27 tmp32 = tl_math.exp(tmp31) tmp33 = tmp29 + tmp32 tmp34 = tmp18 - tmp25 tmp35 = tmp34 * tmp27 tmp36 = tl_math.exp(tmp35) tmp37 = tmp33 + tmp36 tmp38 = tmp24 - tmp25 tmp39 = tmp38 * tmp27 tmp40 = tl_math.exp(tmp39) tmp41 = tmp37 + tmp40 tl.store(out_ptr0 + (x2 + 4 * y3), tmp25, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp41, xmask & ymask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x0 = xindex % 4 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x2 + 4 * x1 + 16 * x3), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x2 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = tl_math.exp(tmp11) tmp14 = tmp12 / tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, YBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + (x2 + 4 * y3), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = xindex // 8 x1 = xindex // 8 % 4 x2 = xindex // 32 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 4 * (-4 + x0) + 16 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 1), (1, 1)) assert_size_stride(primals_9, (1,), (1,)) assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_11, (4, 8), (8, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0), primals_4, alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0), primals_6, alpha=1, beta=1, out=buf3) del primals_6 del primals_7 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 1), (1, 1), 0), primals_8, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](primals_10, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_1[grid(16, 4)](buf5, buf1, buf3, buf6, buf7, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(256)](buf5, buf1, buf3, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf7 triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_9, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf10 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_4[grid(128)](buf1, buf10, buf11, 128, XBLOCK= 128, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_12, reinterpret_tensor(buf11, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf12) del primals_12 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), buf8, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0 ), buf1, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0 ), buf3, reinterpret_tensor(primals_3, (64, 1), (1, 1), 0 ), buf5, buf8, reinterpret_tensor(buf11, (16, 8), (8, 1), 0 ), primals_11, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, embed_dims, heads): super(SelfAttentionNew, self).__init__() self.heads = heads self.embed_dims = embed_dims self.depth = embed_dims // heads self.query = nn.Linear(self.depth, self.depth) self.key = nn.Linear(self.depth, self.depth) self.value = nn.Linear(self.depth, self.depth) self.fc_out = nn.Linear(self.depth * self.heads * 2, self.embed_dims) def forward(self, input_0, input_1, input_2, input_3): primals_4 = self.query.weight primals_5 = self.query.bias primals_6 = self.key.weight primals_7 = self.key.bias primals_8 = self.value.weight primals_9 = self.value.bias primals_11 = self.fc_out.weight primals_12 = self.fc_out.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
Brandon-mg/LipReader-Transformer
SelfAttention
false
2,043
[ "MIT" ]
0
0fe52957943368d7c5b8d1b0df39e3fb14f7c035
https://github.com/Brandon-mg/LipReader-Transformer/tree/0fe52957943368d7c5b8d1b0df39e3fb14f7c035
SmoothL1Loss
import torch import torch.nn as nn import torch.nn.functional as F class SmoothL1Loss(nn.Module): """SmoothL1Loss loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.smooth_l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mul_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SmoothL1LossNew(nn.Module): """SmoothL1Loss loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.smooth_l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
SmoothL1Loss
false
2,044
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
MultiHead
import math import torch from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) class MultiHead(Attention): def __init__(self, in_channels, out_channels, heads=1, groups=1, dropout=0, bias=True): super(MultiHead, self).__init__(dropout) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.groups = groups self.bias = bias assert in_channels % heads == 0 and out_channels % heads == 0 assert in_channels % groups == 0 and out_channels % groups == 0 assert max(groups, self.heads) % min(groups, self.heads) == 0 self.lin_q = Linear(in_channels, out_channels, groups, bias) self.lin_k = Linear(in_channels, out_channels, groups, bias) self.lin_v = Linear(in_channels, out_channels, groups, bias) self.reset_parameters() def reset_parameters(self): self.lin_q.reset_parameters() self.lin_k.reset_parameters() self.lin_v.reset_parameters() def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) == value.size(-1) assert key.size(-2) == value.size(-2) query = self.lin_q(query) key = self.lin_k(key) value = self.lin_v(value) size = list(query.size())[:-2] out_channels_per_head = self.out_channels // self.heads query_size = size + [query.size(-2), self.heads, out_channels_per_head] query = query.view(*query_size).transpose(-2, -3) key_size = size + [key.size(-2), self.heads, out_channels_per_head] key = key.view(*key_size).transpose(-2, -3) value_size = size + [value.size(-2), self.heads, out_channels_per_head] value = value.view(*value_size).transpose(-2, -3) out = super(MultiHead, self).forward(query, key, value) out = out.transpose(-3, -2).contiguous() out = out.view(*(size + [query.size(-2), self.out_channels])) return out def __repr__(self): return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format( self.__class__.__name__, self.in_channels, self.out_channels, self.heads, self.groups, self.dropout, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2 + 64 * ((x1 + 4 * (x2 % 4)) // 16)), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) tl.store(out_ptr1 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1) del primals_6 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2) del primals_8 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_0[grid(256)](buf4, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf3, buf5, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0) del buf3 buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf4, buf6, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(buf5, buf6, out=buf7) buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0 ) del buf6 triton_poi_fused_clamp_div_exp_max_sub_2[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64), torch.float32) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3[grid(64)](buf8, buf7, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0 ) del buf8 triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4[grid(256)](buf10, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_0[grid(256)](buf11, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf12 = buf5 del buf5 buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf11, buf12, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0) del buf11 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf12, out=buf13) del buf12 return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) class MultiHeadNew(Attention): def __init__(self, in_channels, out_channels, heads=1, groups=1, dropout=0, bias=True): super(MultiHeadNew, self).__init__(dropout) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.groups = groups self.bias = bias assert in_channels % heads == 0 and out_channels % heads == 0 assert in_channels % groups == 0 and out_channels % groups == 0 assert max(groups, self.heads) % min(groups, self.heads) == 0 self.lin_q = Linear(in_channels, out_channels, groups, bias) self.lin_k = Linear(in_channels, out_channels, groups, bias) self.lin_v = Linear(in_channels, out_channels, groups, bias) self.reset_parameters() def reset_parameters(self): self.lin_q.reset_parameters() self.lin_k.reset_parameters() self.lin_v.reset_parameters() def __repr__(self): return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format( self.__class__.__name__, self.in_channels, self.out_channels, self.heads, self.groups, self.dropout, self.bias) def forward(self, input_0, input_1, input_2): primals_4 = self.lin_q.weight primals_5 = self.lin_q.bias primals_6 = self.lin_k.weight primals_7 = self.lin_k.bias primals_8 = self.lin_v.weight primals_9 = self.lin_v.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
CFF-Dream/pytorch_geometric
MultiHead
false
2,045
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
RSoftmax
import torch import torch.nn as nn import torch.nn.functional as F class RSoftmax(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'radix': 4, 'groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 256, 1), torch. float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32 ) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return reinterpret_tensor(buf1, (4, 64), (64, 1), 0), class RSoftmaxNew(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ALISCIFP/mmpose
RSoftmax
false
2,046
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
Linear
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_3, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class LinearNew(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(LinearNew, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CFF-Dream/pytorch_geometric
Linear
false
2,047
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
ConvBlockINE
import torch from torch import nn from torch.nn import init as init class ConvBlockINE(nn.Module): def __init__(self, in_ch, out_ch, act='relu', ksize=3): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.norm1 = nn.InstanceNorm2d(out_ch, affine=True) self.norm2 = nn.InstanceNorm2d(out_ch, affine=True) def forward(self, x, g=None, b=None): x1 = self.conv1(x) x1 = self.act(x1) x1 = self.norm1(x1) x1 = self.conv2(x1) x1 = self.act(x1) out = self.norm2(x1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 16, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = 16.0 tmp22 = tmp20 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr2 + x3, tmp25, xmask) tl.store(out_ptr0 + x3, tmp14, xmask) tl.store(out_ptr1 + x3, tmp20, xmask) @triton.jit def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 4, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_copy_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x6 = xindex tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp5 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + x4, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 - tmp14 tmp16 = tl.load(in_ptr2 + x4, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp15 * tmp21 tmp23 = tl.load(in_ptr3 + x4, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tl.load(in_ptr4 + x2, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp10, tmp26, tmp27) tmp29 = tl.load(in_ptr5 + x6, tmp5 & xmask, other=0.0) tmp30 = tl.where(tmp9, tmp28, tmp29) tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype) tmp32 = tl.where(tmp5, tmp30, tmp31) tmp33 = float('nan') tmp34 = tl.where(tmp5, tmp32, tmp33) tl.store(out_ptr0 + x6, tmp34, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x3 = xindex // 6 x4 = xindex tmp30 = tl.load(in_ptr0 + x4, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tl.full([1], 5, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tmp5 & tmp2 tmp7 = -4 + x0 tmp8 = tmp7 < tmp1 tmp9 = tmp8 & tmp6 tmp10 = tl.load(in_ptr0 + (28 + 6 * x3), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (20 + x4), tmp6 & xmask, other=0.0) tmp12 = tl.where(tmp8, tmp10, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp6, tmp12, tmp13) tmp15 = tmp3 < tmp1 tmp16 = tmp15 & tmp2 tmp17 = tl.load(in_ptr0 + (28 + 6 * x3), tmp16 & xmask, eviction_policy ='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (24 + x4), tmp2 & xmask, other=0.0) tmp19 = tl.where(tmp15, tmp17, tmp18) tmp20 = tl.where(tmp5, tmp14, tmp19) tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp2, tmp20, tmp21) tmp23 = tmp8 & tmp5 tmp24 = tl.load(in_ptr0 + (4 + 6 * x3), tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tl.load(in_ptr0 + (-4 + x4), tmp5 & xmask, other=0.0) tmp26 = tl.where(tmp8, tmp24, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp5, tmp26, tmp27) tmp29 = tl.load(in_ptr0 + (4 + 6 * x3), tmp15 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tl.where(tmp15, tmp29, tmp30) tmp32 = tl.where(tmp5, tmp28, tmp31) tmp33 = tl.where(tmp2, tmp22, tmp32) tl.store(out_ptr0 + x4, tmp33, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp4 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (6 + x0 + 36 * x2), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_repeat_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0 % 4, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 16, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 16.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(576)](primals_3, buf0, buf1, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = buf0 del buf0 triton_poi_fused_1[grid(576)](buf1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_convolution_2[grid(16)](buf4, primals_2, buf6, buf7, buf9, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 buf5 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_3[grid(16)](primals_4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf10 = buf1 del buf1 buf11 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32 ) triton_poi_fused_copy_4[grid(576)](buf4, buf6, buf7, buf5, primals_5, buf10, buf11, 576, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_5 buf12 = buf10 del buf10 triton_poi_fused_5[grid(576)](buf11, buf12, 576, XBLOCK=256, num_warps=4, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused_6[grid(576)](buf12, buf13, 576, XBLOCK=256, num_warps=4, num_stages=1) del buf12 buf14 = extern_kernels.convolution(buf13, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = reinterpret_tensor(buf7, (16,), (1,), 0) del buf7 buf15 = buf14 del buf14 buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf21 = empty_strided_cuda((1, 16, 4, 4), (256, 16, 4, 1), torch. float32) buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_convolution_repeat_7[grid(16) ](buf15, primals_8, primals_7, primals_9, buf16, buf17, buf21, buf20, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_7 del primals_8 del primals_9 return reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_6, buf2, buf4, buf5, reinterpret_tensor(buf9, (16,), (1,), 0), buf13, buf15, buf16, reinterpret_tensor(buf20, (16 ,), (1,), 0), reinterpret_tensor(buf17, (1, 16, 1, 1), (16, 1, 1, 1), 0 ), reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0) class ConvBlockINENew(nn.Module): def __init__(self, in_ch, out_ch, act='relu', ksize=3): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.norm1 = nn.InstanceNorm2d(out_ch, affine=True) self.norm2 = nn.InstanceNorm2d(out_ch, affine=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_4 = self.conv2.bias primals_5 = self.norm1.weight primals_7 = self.norm1.bias primals_8 = self.norm2.weight primals_9 = self.norm2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
BaekduChoi/Halftoning_v2
ConvBlockINE
false
2,048
[ "BSD-3-Clause" ]
0
fdb7040e1a4044f23ef9c92757bbb90c23685afe
https://github.com/BaekduChoi/Halftoning_v2/tree/fdb7040e1a4044f23ef9c92757bbb90c23685afe
MSELoss
import torch import torch.nn as nn import torch.nn.functional as F class MSELoss(nn.Module): """MSE loss for coordinate regression.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.mse_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K Args: output (torch.Tensor[N, K, 2]): Output regression. target (torch.Tensor[N, K, 2]): Target regression. target_weight (torch.Tensor[N, K, 2]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSELossNew(nn.Module): """MSE loss for coordinate regression.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.mse_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
MSELoss
false
2,049
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
GlobalAveragePooling
import torch import torch.nn as nn class GlobalAveragePooling(nn.Module): """Global Average Pooling neck. Note that we use `view` to remove extra channel after pooling. We do not use `squeeze` as it will also remove the batch dimension when the tensor has a batch dimension of size 1, which can lead to unexpected errors. """ def __init__(self): super().__init__() self.gap = nn.AdaptiveAvgPool2d((1, 1)) def init_weights(self): pass def forward(self, inputs): if isinstance(inputs, tuple): outs = tuple([self.gap(x) for x in inputs]) outs = tuple([out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) elif isinstance(inputs, list): outs = [self.gap(x) for x in inputs] outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)] elif isinstance(inputs, torch.Tensor): outs = self.gap(inputs) outs = outs.view(inputs.size(0), -1) else: raise TypeError('neck inputs should be tuple or torch.tensor') return outs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0), class GlobalAveragePoolingNew(nn.Module): """Global Average Pooling neck. Note that we use `view` to remove extra channel after pooling. We do not use `squeeze` as it will also remove the batch dimension when the tensor has a batch dimension of size 1, which can lead to unexpected errors. """ def __init__(self): super().__init__() self.gap = nn.AdaptiveAvgPool2d((1, 1)) def init_weights(self): pass def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ALISCIFP/mmpose
GlobalAveragePooling
false
2,050
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
InvConvNear
import torch from torch import nn from torch.nn import functional as F import torch.utils.data class InvConvNear(nn.Module): def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): super().__init__() assert n_split % 2 == 0 self.channels = channels self.n_split = n_split self.no_jacobian = no_jacobian w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split). normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def forward(self, x, x_mask=None, reverse=False, **kwargs): b, c, t = x.size() assert c % self.n_split == 0 if x_mask is None: x_mask = 1 x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t else: x_len = torch.sum(x_mask, [1, 2]) x = x.view(b, 2, c // self.n_split, self.n_split // 2, t) x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t) if reverse: if hasattr(self, 'weight_inv'): weight = self.weight_inv else: weight = torch.inverse(self.weight.float()) logdet = None else: weight = self.weight if self.no_jacobian: logdet = 0 else: logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len weight = weight.view(self.n_split, self.n_split, 1, 1) z = F.conv2d(x, weight) z = z.view(b, 2, self.n_split // 2, c // self.n_split, t) z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask return z, logdet def store_inverse(self): self.weight_inv = torch.inverse(self.weight.float()) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = -1.0 tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp3, None) @triton.jit def triton_poi_fused_mul_scalar_tensor_where_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0).to(tl.int1) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = float('nan') tmp5 = tl.where(tmp1, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 4.0 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (1, 4)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._linalg_slogdet.default(primals_2) buf1 = buf0[0] buf2 = buf0[1] buf3 = buf0[2] buf4 = buf0[3] del buf0 buf5 = empty_strided_cuda((), (), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(1)](buf1, buf5, 1, XBLOCK=1, num_warps=1, num_stages=1) del buf1 buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_scalar_tensor_where_1[grid(4)](buf5, buf2, buf6, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf2 buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_2[grid(4, 4)](primals_2, buf7, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf8 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 4, 1), 0), buf7, stride=(1, 1), padding=(0, 0 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 1, 4), (16, 4, 4, 1)) del buf7 buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0) del buf8 triton_poi_fused_mul_3[grid(64)](buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf9, buf6, reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 8, 1), 0), buf3, buf4, buf5, reinterpret_tensor(primals_2, (4, 4, 1, 1), (1, 4, 4, 4), 0) class InvConvNearNew(nn.Module): def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): super().__init__() assert n_split % 2 == 0 self.channels = channels self.n_split = n_split self.no_jacobian = no_jacobian w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split). normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def store_inverse(self): self.weight_inv = torch.inverse(self.weight.float()) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
AndreHe02/glow-tts
InvConvNear
false
2,051
[ "MIT" ]
0
683f68f17790f2f46c23e9d3eadbcac352d82e2b
https://github.com/AndreHe02/glow-tts/tree/683f68f17790f2f46c23e9d3eadbcac352d82e2b
InterWeightedBCEWithLogits
import torch from torch import Tensor import torch.nn as nn from typing import Optional from typing import Any import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, pos_weight=None): """Calculate the binary CrossEntropy loss with logits. Args: pred (torch.Tensor): The prediction with shape (N, \\*). label (torch.Tensor): The gt label with shape (N, \\*). weight (torch.Tensor, optional): Element-wise weight of loss with shape (N, ). Defaults to None. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (torch.Tensor, optional): The positive weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ assert pred.dim() == label.dim() if class_weight is not None: N = pred.size()[0] class_weight = class_weight.repeat(N, 1) loss = F.binary_cross_entropy_with_logits(pred, label, weight= class_weight, pos_weight=pos_weight, reduction='none') if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class InterWeightedBCEWithLogits(nn.Module): def __init__(self, reduction: 'str'='mean', loss_weight: 'float'=1.0 ) ->None: super(InterWeightedBCEWithLogits, self).__init__() self.reduction = reduction self.loss_weight = loss_weight self.register_buffer('class_weight', None) def receive_data_dist_info(self, num_pos_neg: 'Tensor') ->None: """Weight for each class is sqrt(n_c / (n_dominant + n_total))""" num_pos = num_pos_neg[0] num_dominant = num_pos.max() class_weight = torch.sqrt(num_pos / (num_dominant + num_pos.sum())) class_weight /= class_weight.sum() self.class_weight = class_weight def forward(self, cls_score: 'Tensor', label: 'Tensor', weight: 'Optional[Tensor]'=None, avg_factor: 'Optional[float]'=None, reduction_override: 'Optional[str]'=None, **kwargs: Any ) ->torch.Tensor: assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_cls = self.loss_weight * binary_cross_entropy(cls_score, label, weight, class_weight=self.class_weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = tmp17 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0[grid(1)]( buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, pos_weight=None): """Calculate the binary CrossEntropy loss with logits. Args: pred (torch.Tensor): The prediction with shape (N, \\*). label (torch.Tensor): The gt label with shape (N, \\*). weight (torch.Tensor, optional): Element-wise weight of loss with shape (N, ). Defaults to None. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (torch.Tensor, optional): The positive weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ assert pred.dim() == label.dim() if class_weight is not None: N = pred.size()[0] class_weight = class_weight.repeat(N, 1) loss = F.binary_cross_entropy_with_logits(pred, label, weight= class_weight, pos_weight=pos_weight, reduction='none') if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class InterWeightedBCEWithLogitsNew(nn.Module): def __init__(self, reduction: 'str'='mean', loss_weight: 'float'=1.0 ) ->None: super(InterWeightedBCEWithLogitsNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight self.register_buffer('class_weight', None) def receive_data_dist_info(self, num_pos_neg: 'Tensor') ->None: """Weight for each class is sqrt(n_c / (n_dominant + n_total))""" num_pos = num_pos_neg[0] num_dominant = num_pos.max() class_weight = torch.sqrt(num_pos / (num_dominant + num_pos.sum())) class_weight /= class_weight.sum() self.class_weight = class_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CAMP-eXplain-AI/imba-explain
InterWeightedBCEWithLogits
false
2,052
[ "MIT" ]
0
e41b4ca5de63955cb0e925aad9599f38c5a3e973
https://github.com/CAMP-eXplain-AI/imba-explain/tree/e41b4ca5de63955cb0e925aad9599f38c5a3e973
BCELoss
import torch import torch.nn as nn import torch.nn.functional as F class BCELoss(nn.Module): """Binary Cross Entropy loss.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.binary_cross_entropy self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_labels: K Args: output (torch.Tensor[N, K]): Output classification. target (torch.Tensor[N, K]): Target classification. target_weight (torch.Tensor[N, K] or torch.Tensor[N]): Weights across different labels. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output, target, reduction='none') if target_weight.dim() == 1: target_weight = target_weight[:, None] loss = (loss * target_weight).mean() else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = tmp17 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCELossNew(nn.Module): """Binary Cross Entropy loss.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.binary_cross_entropy self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
BCELoss
false
2,053
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
SoftWingLoss
import math import torch import torch.nn as nn class SoftWingLoss(nn.Module): """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face Alignment' Lin et al. TIP'2021. loss = 1. |x| , if |x| < omega1 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 Args: omega1 (float): The first threshold. omega2 (float): The second threshold. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega1 = omega1 self.omega2 = omega2 self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / self.epsilon) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega1, delta, self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 2.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp4 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tl_math.log(tmp8) tmp10 = 20.0 tmp11 = tmp9 * tmp10 tmp12 = -30.188758248682007 tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp5, tmp3, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(16)]( arg0_1, arg1_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_mul_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf0 return buf1, class SoftWingLossNew(nn.Module): """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face Alignment' Lin et al. TIP'2021. loss = 1. |x| , if |x| < omega1 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 Args: omega1 (float): The first threshold. omega2 (float): The second threshold. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega1 = omega1 self.omega2 = omega2 self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / self.epsilon) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega1, delta, self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
SoftWingLoss
false
2,054
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
Regression
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class Regression(nn.Module): def __init__(self, input_size, output_size): super(Regression, self).__init__() self.layer1 = nn.Linear(input_size, 24) self.layer2 = nn.Linear(24, 24) self.layer3 = nn.Linear(24, output_size) def forward(self, x): x1 = F.relu(self.layer1(x)) x2 = F.relu(self.layer2(x1)) x3 = self.layer3(x2) return x3 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 24 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (24, 4), (4, 1)) assert_size_stride(primals_2, (24,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (24, 24), (24, 1)) assert_size_stride(primals_5, (24,), (1,)) assert_size_stride(primals_6, (4, 24), (24, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf1, primals_2, buf6, 1536, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 24), (24, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(primals_4, (24, 24), (1, 24), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 24), (384, 96, 24, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf3, primals_5, buf5, 1536, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 24), (24, 1), 0), reinterpret_tensor(primals_6, (24, 4), (1, 24), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor( buf3, (64, 24), (24, 1), 0), primals_6, buf5, primals_4, buf6 class RegressionNew(nn.Module): def __init__(self, input_size, output_size): super(RegressionNew, self).__init__() self.layer1 = nn.Linear(input_size, 24) self.layer2 = nn.Linear(24, 24) self.layer3 = nn.Linear(24, output_size) def forward(self, input_0): primals_1 = self.layer1.weight primals_2 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_6 = self.layer3.weight primals_7 = self.layer3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
BEOKS/Windows-Machine-Learning
Regression
false
2,055
[ "MIT" ]
0
e227909baa5ef604d45afa976dc04598f09d76bd
https://github.com/BEOKS/Windows-Machine-Learning/tree/e227909baa5ef604d45afa976dc04598f09d76bd
L2Norm
import torch import torch.nn as nn class L2Norm(nn.Module): def __init__(self, n_dims, scale=20.0, eps=1e-10): super(L2Norm, self).__init__() self.n_dims = n_dims self.weight = nn.Parameter(torch.Tensor(self.n_dims)) self.eps = eps self.scale = scale def forward(self, x): x_float = x.float() norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps return (self.weight[None, :, None, None].float().expand_as(x_float) * x_float / norm).type_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dims': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = 1e-10 tmp16 = tmp14 + tmp15 tmp17 = tmp2 / tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, n_dims, scale=20.0, eps=1e-10): super(L2NormNew, self).__init__() self.n_dims = n_dims self.weight = nn.Parameter(torch.Tensor(self.n_dims)) self.eps = eps self.scale = scale def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
CK-er/mmdet
L2Norm
false
2,056
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
MPJPELoss
import torch import torch.nn as nn class MPJPELoss(nn.Module): """MPJPE (Mean Per Joint Position Error) loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N,K,D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = torch.mean(torch.norm((output - target) * target_weight, dim=-1)) else: loss = torch.mean(torch.norm(output - target, dim=-1)) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tmp25 = 1.0 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MPJPELossNew(nn.Module): """MPJPE (Mean Per Joint Position Error) loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
MPJPELoss
false
2,057
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
TransformerNet
import torch import torch.onnx class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample if upsample: self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample) reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = self.upsample_layer(x_in) out = self.reflection_pad(x_in) out = self.conv2d(out) return out class TransformerNet(torch.nn.Module): def __init__(self): super(TransformerNet, self).__init__() self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2) self.in3 = torch.nn.InstanceNorm2d(128, affine=True) self.res1 = ResidualBlock(128) self.res2 = ResidualBlock(128) self.res3 = ResidualBlock(128) self.res4 = ResidualBlock(128) self.res5 = ResidualBlock(128) self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) self.in4 = torch.nn.InstanceNorm2d(64, affine=True) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) self.relu = torch.nn.ReLU() def forward(self, X): y = self.relu(self.in1(self.conv1(X))) y = self.relu(self.in2(self.conv2(y))) y = self.relu(self.in3(self.conv3(y))) y = self.res1(y) y = self.res2(y) y = self.res3(y) y = self.res4(y) y = self.res5(y) y = self.relu(self.in4(self.deconv1(y))) y = self.relu(self.in5(self.deconv2(y))) y = self.deconv3(y) return y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 62208 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 32 tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6_tmp[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 32, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 557568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x2 = xindex // 4356 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_4(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 1024, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1024.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_repeat_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 64, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 128 tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0 % 128, None, eviction_policy='evict_last') tmp2 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None) tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = tl.broadcast_to(tmp5, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.full([1], 256, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp5 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp4 - tmp12 tmp24 = tmp23 * tmp22 tmp25 = tmp24 * tmp0 tmp26 = tmp25 + tmp1 tmp27 = tl.full([1], 0, tl.int32) tmp28 = triton_helpers.maximum(tmp27, tmp26) tl.store(out_ptr0 + x0, tmp0, None) tl.store(out_ptr1 + x0, tmp1, None) tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp4, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp22, None) tl.store(out_ptr3 + (r3 + 256 * x0), tmp28, None) tl.store(out_ptr2 + x0, tmp12, None) @triton.jit def triton_poi_fused_reflection_pad2d_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_9(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_repeat_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 128, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 128 tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp27 = tl.load(in_out_ptr1 + (r3 + 256 * x0), None) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = tl.broadcast_to(tmp4, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.full([1], 256, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp3 - tmp11 tmp18 = 256.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tmp23 * tmp0 tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tl.store(out_ptr0 + x0, tmp0, None) tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp3, None) tl.store(in_out_ptr1 + (r3 + 256 * x0), tmp28, None) tl.store(out_ptr3 + x0, tmp22, None) tl.store(out_ptr1 + x0, tmp11, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_13(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None) tl.store(out_ptr2 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) tl.store(out_ptr1 + x3, tmp15, None) @triton.jit def triton_poi_fused_arange_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_reflection_pad2d_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 34 % 34 x0 = xindex % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp11 * tmp17 tmp20 = tmp18 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr6 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp24 = tmp22 + tmp23 tl.store(out_ptr0 + x7, tmp24, None) @triton.jit def triton_poi_fused_arange_17(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_relu_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 66 % 66 x0 = xindex % 66 x2 = xindex // 4356 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2), xmask, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tl.store(out_ptr0 + x5, tmp19, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63 ) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (32,), (1,)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64,), (1,)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128,), (1,)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (128,), (1,)) assert_size_stride(primals_24, (128,), (1,)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (128,), (1,)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (128,), (1,)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128,), (1,)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (128,), (1,)) assert_size_stride(primals_40, (128,), (1,)) assert_size_stride(primals_41, (128,), (1,)) assert_size_stride(primals_42, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_43, (128,), (1,)) assert_size_stride(primals_44, (128,), (1,)) assert_size_stride(primals_45, (128,), (1,)) assert_size_stride(primals_46, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_47, (128,), (1,)) assert_size_stride(primals_48, (128,), (1,)) assert_size_stride(primals_49, (128,), (1,)) assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_51, (128,), (1,)) assert_size_stride(primals_52, (128,), (1,)) assert_size_stride(primals_53, (128,), (1,)) assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_55, (64,), (1,)) assert_size_stride(primals_56, (64,), (1,)) assert_size_stride(primals_57, (64,), (1,)) assert_size_stride(primals_58, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_59, (32,), (1,)) assert_size_stride(primals_60, (32,), (1,)) assert_size_stride(primals_61, (32,), (1,)) assert_size_stride(primals_62, (3, 32, 9, 9), (2592, 81, 9, 1)) assert_size_stride(primals_63, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 72, 72), (15552, 5184, 72, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(62208)](primals_1, buf0, 62208, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf2 = buf1 del buf1 buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32 ) buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch .float32) buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf6 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)](buf2 , buf8, primals_3, buf5, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_3 buf3 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_4, buf3, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_4 buf4 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_5, buf4, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 4356, 66, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_3[grid(557568)](buf2, buf5, buf8, buf3, buf4, buf9, 557568, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf11 = buf10 del buf10 buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf15 triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)]( buf11, buf17, primals_7, buf14, 256, 1024, num_warps=8, num_stages=1) del primals_7 buf12 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_8, buf12, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_8 buf13 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_9, buf13, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf18 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_6[grid(295936)](buf11, buf14, buf17, buf12, buf13, buf18, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf21 = empty_strided_cuda((512,), (1,), torch.float32) buf22 = empty_strided_cuda((512,), (1,), torch.float32) buf20 = buf19 del buf19 buf23 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf24 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf26 = reinterpret_tensor(buf24, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf24 buf27 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7[ grid(512)](buf20, buf26, primals_12, primals_13, primals_11, buf21, buf22, buf23, buf27, 512, 256, num_warps=2, num_stages=1) del primals_11 del primals_12 del primals_13 buf28 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf27, buf28, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf29 = extern_kernels.convolution(buf28, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 16, 16), (32768, 256, 16, 1)) buf30 = buf29 del buf29 buf33 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf34 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf36 = reinterpret_tensor(buf34, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf34 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf30, buf36, primals_15, buf33, 512, 256, num_warps=2, num_stages=1) del primals_15 buf31 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_16, buf31, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_16 buf32 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_17, buf32, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf37 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf30, buf33, buf36, buf31, buf32, buf37, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1)) buf40 = empty_strided_cuda((512,), (1,), torch.float32) buf39 = buf38 del buf38 buf41 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf45 = buf27 del buf27 buf44 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf39, buf45, primals_20, primals_19, primals_21, buf40, buf41, buf44, 512, 256, num_warps=2, num_stages=1) del primals_19 del primals_20 del primals_21 buf46 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf45, buf46, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 128, 16, 16), (32768, 256, 16, 1)) buf48 = buf47 del buf47 buf51 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf52 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf54 = reinterpret_tensor(buf52, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf52 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf48, buf54, primals_23, buf51, 512, 256, num_warps=2, num_stages=1) del primals_23 buf49 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_24, buf49, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_24 buf50 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_25, buf50, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf55 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf48, buf51, buf54, buf49, buf50, buf55, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf55, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 128, 16, 16), (32768, 256, 16, 1)) buf58 = empty_strided_cuda((512,), (1,), torch.float32) buf57 = buf56 del buf56 buf59 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf63 = buf45 del buf45 buf62 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf57, buf63, primals_28, primals_27, primals_29, buf58, buf59, buf62, 512, 256, num_warps=2, num_stages=1) del primals_27 del primals_28 del primals_29 buf64 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf63, buf64, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf65 = extern_kernels.convolution(buf64, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 128, 16, 16), (32768, 256, 16, 1)) buf66 = buf65 del buf65 buf69 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf70 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf72 = reinterpret_tensor(buf70, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf70 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf66, buf72, primals_31, buf69, 512, 256, num_warps=2, num_stages=1) del primals_31 buf67 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_32, buf67, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_32 buf68 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_33, buf68, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_33 buf73 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf66, buf69, buf72, buf67, buf68, buf73, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf74 = extern_kernels.convolution(buf73, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf74, (4, 128, 16, 16), (32768, 256, 16, 1)) buf76 = empty_strided_cuda((512,), (1,), torch.float32) buf75 = buf74 del buf74 buf77 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf81 = buf63 del buf63 buf80 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf75, buf81, primals_36, primals_35, primals_37, buf76, buf77, buf80, 512, 256, num_warps=2, num_stages=1) del primals_35 del primals_36 del primals_37 buf82 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf81, buf82, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf83 = extern_kernels.convolution(buf82, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1)) buf84 = buf83 del buf83 buf87 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf88 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf90 = reinterpret_tensor(buf88, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf88 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf84, buf90, primals_39, buf87, 512, 256, num_warps=2, num_stages=1) del primals_39 buf85 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_40, buf85, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_40 buf86 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_41, buf86, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_41 buf91 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf84, buf87, buf90, buf85, buf86, buf91, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf92 = extern_kernels.convolution(buf91, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf92, (4, 128, 16, 16), (32768, 256, 16, 1)) buf94 = empty_strided_cuda((512,), (1,), torch.float32) buf93 = buf92 del buf92 buf95 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf99 = buf81 del buf81 buf98 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf93, buf99, primals_44, primals_43, primals_45, buf94, buf95, buf98, 512, 256, num_warps=2, num_stages=1) del primals_43 del primals_44 del primals_45 buf100 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf99, buf100, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf101 = extern_kernels.convolution(buf100, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf101, (4, 128, 16, 16), (32768, 256, 16, 1)) buf102 = buf101 del buf101 buf105 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf106 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf108 = reinterpret_tensor(buf106, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf106 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf102, buf108, primals_47, buf105, 512, 256, num_warps=2, num_stages=1) del primals_47 buf103 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_48, buf103, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_48 buf104 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_49, buf104, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_49 buf109 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf102, buf105, buf108, buf103, buf104, buf109, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf110 = extern_kernels.convolution(buf109, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf110, (4, 128, 16, 16), (32768, 256, 16, 1)) buf111 = buf110 del buf110 buf113 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf114 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf116 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_13[grid(512)]( buf111, primals_51, buf113, buf114, buf116, 512, 256, num_warps =2, num_stages=1) del primals_51 buf112 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_52, buf112, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_52 buf117 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_arange_14[grid(32)](buf117, 32, XBLOCK=32, num_warps=1, num_stages=1) buf118 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_15[grid(32)](buf118, 32, XBLOCK=32, num_warps=1, num_stages=1) buf119 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) triton_poi_fused__unsafe_index_add_reflection_pad2d_16[grid(591872)]( buf118, buf111, buf113, buf114, buf112, primals_53, buf99, buf119, 591872, XBLOCK=512, num_warps=8, num_stages=1) del buf114 del buf99 del primals_53 buf120 = extern_kernels.convolution(buf119, primals_54, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf121 = buf120 del buf120 buf124 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf125 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf127 = reinterpret_tensor(buf125, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf125 triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)]( buf121, buf127, primals_55, buf124, 256, 1024, num_warps=8, num_stages=1) del primals_55 buf122 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_56, buf122, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_56 buf123 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_57, buf123, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_57 buf128 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_arange_17[grid(64)](buf128, 64, XBLOCK=64, num_warps=1, num_stages=1) buf129 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_18[grid(64)](buf129, 64, XBLOCK=64, num_warps=1, num_stages=1) buf130 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_reflection_pad2d_relu_19[grid(1115136)]( buf129, buf121, buf124, buf127, buf122, buf123, buf130, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) buf131 = extern_kernels.convolution(buf130, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf132 = buf131 del buf131 buf135 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch. float32) buf136 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32) buf138 = reinterpret_tensor(buf136, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf136 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)]( buf132, buf138, primals_59, buf135, 128, 4096, XBLOCK=1, RBLOCK =2048, num_warps=16, num_stages=1) del primals_59 buf133 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_60, buf133, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_60 buf134 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_61, buf134, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_61 buf139 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_20[grid(663552)](buf132, buf135, buf138, buf133, buf134, buf139, 663552, XBLOCK=1024, num_warps=4, num_stages=1) buf140 = extern_kernels.convolution(buf139, primals_62, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf140, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf141 = buf140 del buf140 triton_poi_fused_convolution_21[grid(49152)](buf141, primals_63, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_63 return (buf141, primals_2, primals_6, primals_10, primals_14, primals_18, primals_22, primals_26, primals_30, primals_34, primals_38, primals_42, primals_46, primals_50, primals_54, primals_58, primals_62, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, buf13, buf14, buf17, buf18, buf20, buf21, buf22, buf23, buf26, buf28, buf30, buf31, buf32, buf33, buf36, buf37, buf39, buf40, reinterpret_tensor(buf44, (512,), (1,), 0), buf46, buf48, buf49, buf50, buf51, buf54, buf55, buf57, buf58, reinterpret_tensor(buf62, (512,), (1,), 0), buf64, buf66, buf67, buf68, buf69, buf72, buf73, buf75, buf76, reinterpret_tensor(buf80, (512,), (1,), 0), buf82, buf84, buf85, buf86, buf87, buf90, buf91, buf93, buf94, reinterpret_tensor(buf98, (512,), (1,), 0), buf100, buf102, buf103, buf104, buf105, buf108, buf109, buf111, buf112, reinterpret_tensor(buf116, (512,), (1,), 0), buf117, buf118, buf119, buf121, buf122, buf123, buf124, buf127, buf128, buf129, buf130, buf132, buf133, buf134, buf135, buf138, buf139, reinterpret_tensor( buf113, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor( buf95, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf77, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf59, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf41, (1, 512, 1, 1), (512, 1, 1, 1), 0)) class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample if upsample: self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample) reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = self.upsample_layer(x_in) out = self.reflection_pad(x_in) out = self.conv2d(out) return out class TransformerNetNew(torch.nn.Module): def __init__(self): super(TransformerNetNew, self).__init__() self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2) self.in3 = torch.nn.InstanceNorm2d(128, affine=True) self.res1 = ResidualBlock(128) self.res2 = ResidualBlock(128) self.res3 = ResidualBlock(128) self.res4 = ResidualBlock(128) self.res5 = ResidualBlock(128) self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) self.in4 = torch.nn.InstanceNorm2d(64, affine=True) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv1.conv2d.bias primals_4 = self.in1.weight primals_5 = self.in1.bias primals_6 = self.conv2.conv2d.weight primals_7 = self.conv2.conv2d.bias primals_8 = self.in2.weight primals_9 = self.in2.bias primals_10 = self.conv3.conv2d.weight primals_11 = self.conv3.conv2d.bias primals_12 = self.in3.weight primals_13 = self.in3.bias primals_14 = self.res1.conv1.conv2d.weight primals_15 = self.res1.conv1.conv2d.bias primals_16 = self.res1.in1.weight primals_17 = self.res1.in1.bias primals_18 = self.res1.conv2.conv2d.weight primals_19 = self.res1.conv2.conv2d.bias primals_20 = self.res1.in2.weight primals_21 = self.res1.in2.bias primals_22 = self.res2.conv1.conv2d.weight primals_23 = self.res2.conv1.conv2d.bias primals_24 = self.res2.in1.weight primals_25 = self.res2.in1.bias primals_26 = self.res2.conv2.conv2d.weight primals_27 = self.res2.conv2.conv2d.bias primals_28 = self.res2.in2.weight primals_29 = self.res2.in2.bias primals_30 = self.res3.conv1.conv2d.weight primals_31 = self.res3.conv1.conv2d.bias primals_32 = self.res3.in1.weight primals_33 = self.res3.in1.bias primals_34 = self.res3.conv2.conv2d.weight primals_35 = self.res3.conv2.conv2d.bias primals_36 = self.res3.in2.weight primals_37 = self.res3.in2.bias primals_38 = self.res4.conv1.conv2d.weight primals_39 = self.res4.conv1.conv2d.bias primals_40 = self.res4.in1.weight primals_41 = self.res4.in1.bias primals_42 = self.res4.conv2.conv2d.weight primals_43 = self.res4.conv2.conv2d.bias primals_44 = self.res4.in2.weight primals_45 = self.res4.in2.bias primals_46 = self.res5.conv1.conv2d.weight primals_47 = self.res5.conv1.conv2d.bias primals_48 = self.res5.in1.weight primals_49 = self.res5.in1.bias primals_50 = self.res5.conv2.conv2d.weight primals_51 = self.res5.conv2.conv2d.bias primals_52 = self.res5.in2.weight primals_53 = self.res5.in2.bias primals_54 = self.deconv1.conv2d.weight primals_55 = self.deconv1.conv2d.bias primals_56 = self.in4.weight primals_57 = self.in4.bias primals_58 = self.deconv2.conv2d.weight primals_59 = self.deconv2.conv2d.bias primals_60 = self.in5.weight primals_61 = self.in5.bias primals_62 = self.deconv3.conv2d.weight primals_63 = self.deconv3.conv2d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63]) return output[0]
Ali-ry/azureml-examples
TransformerNet
false
2,058
[ "MIT" ]
0
817ae89d2766dcafd70937a22cb3a80f100a2906
https://github.com/Ali-ry/azureml-examples/tree/817ae89d2766dcafd70937a22cb3a80f100a2906
DenseGCNConv
import math import torch from torch.nn import Parameter import torch.utils.data def glorot(tensor): if tensor is not None: stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1))) tensor.data.uniform_(-stdv, stdv) def zeros(tensor): if tensor is not None: tensor.data.fill_(0) class DenseGCNConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GCNConv`. """ def __init__(self, in_channels, out_channels, improved=False, bias=True): super(DenseGCNConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.improved = improved self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): glorot(self.weight) zeros(self.bias) def forward(self, x, adj, mask=None, add_loop=True): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) add_loop (bool, optional): If set to :obj:`False`, the layer will not automatically add self-loops to the adjacency matrices. (default: :obj:`True`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() if add_loop: adj = adj.clone() idx = torch.arange(N, dtype=torch.long, device=adj.device) adj[:, idx, idx] = 1 if not self.improved else 2 out = torch.matmul(x, self.weight) deg_inv_sqrt = adj.sum(dim=-1).clamp(min=1).pow(-0.5) adj = deg_inv_sqrt.unsqueeze(-1) * adj * deg_inv_sqrt.unsqueeze(-2) out = torch.matmul(adj, out) if self.bias is not None: out = out + self.bias if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 tmp0 = 1.0 tl.store(out_ptr0 + (5 * x0 + 16 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + x4, xmask) tmp13 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = -0.5 tmp10 = libdevice.pow(tmp8, tmp9) tmp12 = tmp10 * tmp11 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp20 = triton_helpers.maximum(tmp19, tmp7) tmp21 = libdevice.pow(tmp20, tmp9) tmp22 = tmp12 * tmp21 tl.store(out_ptr0 + x4, tmp22, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 triton_poi_fused_index_put_lift_fresh_1[grid(16)](buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](buf0, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf5) del buf2 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_4[grid(256)](buf6, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf6, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) def glorot(tensor): if tensor is not None: stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1))) tensor.data.uniform_(-stdv, stdv) def zeros(tensor): if tensor is not None: tensor.data.fill_(0) class DenseGCNConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GCNConv`. """ def __init__(self, in_channels, out_channels, improved=False, bias=True): super(DenseGCNConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.improved = improved self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): glorot(self.weight) zeros(self.bias) def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
CFF-Dream/pytorch_geometric
DenseGCNConv
false
2,059
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
AsymmetricLoss
import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def asymmetric_loss(pred, target, weight=None, gamma_pos=1.0, gamma_neg=4.0, clip=0.05, reduction='mean', avg_factor=None): """asymmetric loss. Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for details. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). Defaults to None. gamma_pos (float): positive focusing parameter. Defaults to 0.0. gamma_neg (float): Negative focusing parameter. We usually set gamma_neg > gamma_pos. Defaults to 4.0. clip (float, optional): Probability margin. Defaults to 0.05. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' eps = 1e-08 pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if clip and clip > 0: pt = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target ) + pred_sigmoid * target else: pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * (1 - target)) loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class AsymmetricLoss(nn.Module): """asymmetric loss. Args: gamma_pos (float): positive focusing parameter. Defaults to 0.0. gamma_neg (float): Negative focusing parameter. We usually set gamma_neg > gamma_pos. Defaults to 4.0. clip (float, optional): Probability margin. Defaults to 0.05. reduction (str): The method used to reduce the loss into a scalar. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction= 'mean', loss_weight=1.0): super(AsymmetricLoss, self).__init__() self.gamma_pos = gamma_pos self.gamma_neg = gamma_neg self.clip = clip self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """asymmetric loss.""" assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_cls = self.loss_weight * asymmetric_loss(pred, target, weight, gamma_pos=self.gamma_pos, gamma_neg=self.gamma_neg, clip=self. clip, reduction=reduction, avg_factor=avg_factor) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = 0.05 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.minimum(tmp5, tmp2) tmp8 = tmp2 - tmp7 tmp9 = tmp6 * tmp8 tmp10 = tmp1 * tmp7 tmp11 = tmp9 + tmp10 tmp12 = 1e-08 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tmp2 - tmp11 tmp17 = 0.0 tmp18 = tmp7 * tmp17 tmp19 = 4.0 tmp20 = tmp8 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = libdevice.pow(tmp16, tmp21) tmp23 = tmp15 * tmp22 tmp24 = tl.broadcast_to(tmp23, [RBLOCK]) tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0)) tmp27 = 256.0 tmp28 = tmp26 / tmp27 tmp29 = tmp28 * tmp2 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0[grid(1) ](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def asymmetric_loss(pred, target, weight=None, gamma_pos=1.0, gamma_neg=4.0, clip=0.05, reduction='mean', avg_factor=None): """asymmetric loss. Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for details. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). Defaults to None. gamma_pos (float): positive focusing parameter. Defaults to 0.0. gamma_neg (float): Negative focusing parameter. We usually set gamma_neg > gamma_pos. Defaults to 4.0. clip (float, optional): Probability margin. Defaults to 0.05. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' eps = 1e-08 pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if clip and clip > 0: pt = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target ) + pred_sigmoid * target else: pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * (1 - target)) loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class AsymmetricLossNew(nn.Module): """asymmetric loss. Args: gamma_pos (float): positive focusing parameter. Defaults to 0.0. gamma_neg (float): Negative focusing parameter. We usually set gamma_neg > gamma_pos. Defaults to 4.0. clip (float, optional): Probability margin. Defaults to 0.05. reduction (str): The method used to reduce the loss into a scalar. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction= 'mean', loss_weight=1.0): super(AsymmetricLossNew, self).__init__() self.gamma_pos = gamma_pos self.gamma_neg = gamma_neg self.clip = clip self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CAMP-eXplain-AI/imba-explain
AsymmetricLoss
false
2,060
[ "MIT" ]
0
e41b4ca5de63955cb0e925aad9599f38c5a3e973
https://github.com/CAMP-eXplain-AI/imba-explain/tree/e41b4ca5de63955cb0e925aad9599f38c5a3e973
ConvBlockLNEDense
import torch from torch import nn from torch.nn import init as init class ConvBlockLNEDense(nn.Module): def __init__(self, n_ch, act='relu', ksize=3): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv2 = nn.Conv2d(2 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv3 = nn.Conv2d(3 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv4 = nn.Conv2d(4 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.norm1 = nn.GroupNorm(1, n_ch, affine=True) self.norm2 = nn.GroupNorm(1, n_ch, affine=True) self.norm3 = nn.GroupNorm(1, n_ch, affine=True) def forward(self, x, g=None, b=None): x1 = self.conv1(x) x1 = self.act(x1) x1 = self.norm1(x1) x2 = torch.cat([x1, x], dim=1) x2 = self.conv2(x2) x2 = self.act(x2) x2 = self.norm2(x2) x3 = torch.cat([x2, x1, x], dim=1) x3 = self.conv3(x3) x3 = self.act(x3) x3 = self.norm3(x3) x4 = torch.cat([x3, x2, x1, x], dim=1) out = self.conv4(x4) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused_cat_convolution_native_group_norm_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r2 = rindex // 16 tmp0 = tl.load(in_out_ptr0 + (r3 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 64, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp4 - tmp14 tmp22 = 64.0 tmp23 = tmp20 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp21 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(in_out_ptr0 + (r3 + 64 * x0), tmp2, xmask) tl.store(out_ptr2 + (r3 + 128 * x0), tmp31, xmask) tl.store(out_ptr3 + (r3 + 192 * x0), tmp31, xmask) tl.store(out_ptr4 + (r3 + 256 * x0), tmp31, xmask) tl.store(out_ptr5 + x0, tmp26, xmask) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 128 * x1), tmp0, xmask) tl.store(out_ptr1 + (x0 + 192 * x1), tmp0, xmask) tl.store(out_ptr2 + (x0 + 256 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_copy_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused_cat_convolution_native_group_norm_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r2 = rindex // 16 tmp0 = tl.load(in_out_ptr0 + (r3 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 64, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp4 - tmp14 tmp22 = 64.0 tmp23 = tmp20 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp21 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(in_out_ptr0 + (r3 + 64 * x0), tmp2, xmask) tl.store(out_ptr2 + (r3 + 192 * x0), tmp31, xmask) tl.store(out_ptr3 + (r3 + 256 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp26, xmask) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_copy_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused_convolution_native_group_norm_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r2 = rindex // 16 tmp0 = tl.load(in_out_ptr0 + (r3 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 64, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp4 - tmp14 tmp22 = 64.0 tmp23 = tmp20 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp21 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(in_out_ptr0 + (r3 + 64 * x0), tmp2, xmask) tl.store(out_ptr2 + (r3 + 256 * x0), tmp31, xmask) tl.store(out_ptr3 + x0, tmp26, xmask) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_copy_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(576)](primals_3, buf0, buf1, 576, XBLOCK=256, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused_1[grid(576)](buf1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf3 = extern_kernels.convolution(buf2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) buf8 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 16, 4, 1), 0) buf24 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) buf22 = reinterpret_tensor(buf24, (4, 4, 4, 4), (192, 16, 4, 1), 64) buf38 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) buf36 = reinterpret_tensor(buf38, (4, 4, 4, 4), (256, 16, 4, 1), 128) buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_cat_convolution_native_group_norm_2[grid(4)](buf4, primals_2, primals_4, primals_5, buf5, buf8, buf22, buf36, buf9, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_5 buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 16, 4, 1), 64) buf23 = reinterpret_tensor(buf24, (4, 4, 4, 4), (192, 16, 4, 1), 128) buf37 = reinterpret_tensor(buf38, (4, 4, 4, 4), (256, 16, 4, 1), 192) triton_poi_fused_cat_3[grid(256)](primals_3, buf10, buf23, buf37, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf12 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32 ) buf13 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32 ) triton_poi_fused_copy_4[grid(1152)](buf11, buf12, buf13, 1152, XBLOCK=256, num_warps=4, num_stages=1) del buf10 del buf11 del buf8 buf14 = buf12 del buf12 triton_poi_fused_5[grid(1152)](buf13, buf14, 1152, XBLOCK=256, num_warps=4, num_stages=1) del buf13 buf15 = extern_kernels.convolution(buf14, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = buf15 del buf15 buf17 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf20 = reinterpret_tensor(buf24, (4, 4, 4, 4), (192, 16, 4, 1), 0) buf35 = reinterpret_tensor(buf38, (4, 4, 4, 4), (256, 16, 4, 1), 64) buf21 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_cat_convolution_native_group_norm_6[grid(4)](buf16, primals_7, primals_8, primals_9, buf17, buf20, buf35, buf21, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 del primals_9 buf25 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch. float32) buf26 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch. float32) triton_poi_fused_copy_7[grid(1728)](buf24, buf25, buf26, 1728, XBLOCK=128, num_warps=4, num_stages=1) del buf20 del buf22 del buf23 del buf24 buf27 = buf25 del buf25 triton_poi_fused_8[grid(1728)](buf26, buf27, 1728, XBLOCK=256, num_warps=4, num_stages=1) del buf26 buf28 = extern_kernels.convolution(buf27, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 4, 4, 4), (64, 16, 4, 1)) buf29 = buf28 del buf28 buf30 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf34 = reinterpret_tensor(buf38, (4, 4, 4, 4), (256, 16, 4, 1), 0) buf33 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_convolution_native_group_norm_9[grid(4)](buf29, primals_11, primals_12, primals_13, buf30, buf34, buf33, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_11 del primals_13 buf39 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch. float32) buf40 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch. float32) triton_poi_fused_copy_10[grid(2304)](buf38, buf39, buf40, 2304, XBLOCK=128, num_warps=4, num_stages=1) del buf34 del buf35 del buf36 del buf37 del buf38 buf41 = buf39 del buf39 triton_poi_fused_11[grid(2304)](buf40, buf41, 2304, XBLOCK=256, num_warps=4, num_stages=1) del buf40 buf42 = extern_kernels.convolution(buf41, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 4, 4, 4), (64, 16, 4, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_12[grid(256)](buf43, primals_15, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 return (buf43, primals_1, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf2, buf4, reinterpret_tensor(buf5, (4, 1), (1, 1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf14, buf16, reinterpret_tensor(buf17, (4, 1), (1, 1), 0), reinterpret_tensor(buf21, (4, 1), (1, 1), 0), buf27, buf29, reinterpret_tensor(buf30, (4, 1), (1, 1), 0), reinterpret_tensor( buf33, (4, 1), (1, 1), 0), buf41) class ConvBlockLNEDenseNew(nn.Module): def __init__(self, n_ch, act='relu', ksize=3): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv2 = nn.Conv2d(2 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv3 = nn.Conv2d(3 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv4 = nn.Conv2d(4 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.norm1 = nn.GroupNorm(1, n_ch, affine=True) self.norm2 = nn.GroupNorm(1, n_ch, affine=True) self.norm3 = nn.GroupNorm(1, n_ch, affine=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_4 = self.conv2.bias primals_10 = self.conv3.weight primals_5 = self.conv3.bias primals_14 = self.conv4.weight primals_7 = self.conv4.bias primals_8 = self.norm1.weight primals_9 = self.norm1.bias primals_11 = self.norm2.weight primals_12 = self.norm2.bias primals_13 = self.norm3.weight primals_15 = self.norm3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
BaekduChoi/Halftoning_v2
ConvBlockLNEDense
false
2,061
[ "BSD-3-Clause" ]
0
fdb7040e1a4044f23ef9c92757bbb90c23685afe
https://github.com/BaekduChoi/Halftoning_v2/tree/fdb7040e1a4044f23ef9c92757bbb90c23685afe
WingLoss
import math import torch import torch.nn as nn class WingLoss(nn.Module): """Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks' Feng et al. CVPR'2018. Args: omega (float): Also referred to as width. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega=10.0, epsilon=2.0, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega = omega self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon)) def criterion(self, pred, target): """Criterion of wingloss. Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega, self.omega * torch.log(1.0 + delta / self.epsilon), delta - self.C) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N,K,D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 10.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp10 * tmp4 tmp12 = -7.91759469228055 tmp13 = tmp3 - tmp12 tmp14 = tl.where(tmp5, tmp11, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(16)]( arg0_1, arg1_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_mul_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf0 return buf1, class WingLossNew(nn.Module): """Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks' Feng et al. CVPR'2018. Args: omega (float): Also referred to as width. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega=10.0, epsilon=2.0, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega = omega self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon)) def criterion(self, pred, target): """Criterion of wingloss. Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega, self.omega * torch.log(1.0 + delta / self.epsilon), delta - self.C) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
WingLoss
false
2,062
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
L1Loss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1Loss(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * l1_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1LossNew(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super(L1LossNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CK-er/mmdet
L1Loss
false
2,063
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
SmoothNetResBlock
import torch import torch.nn as nn class SmoothNetResBlock(nn.Module): """Residual block module used in SmoothNet. Args: in_channels (int): Input channel number. hidden_channels (int): The hidden feature channel number. dropout (float): Dropout probability. Default: 0.5 Shape: Input: (*, in_channels) Output: (*, in_channels) """ def __init__(self, in_channels, hidden_channels, dropout=0.5): super().__init__() self.linear1 = nn.Linear(in_channels, hidden_channels) self.linear2 = nn.Linear(hidden_channels, in_channels) self.lrelu = nn.LeakyReLU(0.2, inplace=True) self.dropout = nn.Dropout(p=dropout, inplace=True) def forward(self, x): identity = x x = self.linear1(x) x = self.dropout(x) x = self.lrelu(x) x = self.linear2(x) x = self.dropout(x) x = self.lrelu(x) out = x + identity return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'hidden_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr0 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_backward_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp9 = tmp7 + tmp8 tmp10 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr1 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(256)](buf1, primals_3, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_backward_2[grid(256)](buf3, primals_5, primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf2, buf5, primals_4, buf6 class SmoothNetResBlockNew(nn.Module): """Residual block module used in SmoothNet. Args: in_channels (int): Input channel number. hidden_channels (int): The hidden feature channel number. dropout (float): Dropout probability. Default: 0.5 Shape: Input: (*, in_channels) Output: (*, in_channels) """ def __init__(self, in_channels, hidden_channels, dropout=0.5): super().__init__() self.linear1 = nn.Linear(in_channels, hidden_channels) self.linear2 = nn.Linear(hidden_channels, in_channels) self.lrelu = nn.LeakyReLU(0.2, inplace=True) self.dropout = nn.Dropout(p=dropout, inplace=True) def forward(self, input_0): primals_2 = self.linear1.weight primals_3 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ALISCIFP/mmpose
SmoothNetResBlock
false
2,064
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
SmoothL1Loss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) return loss class SmoothL1Loss(nn.Module): def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, ** kwargs) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp6 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_div_lt_mean_mul_sub_where_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) return loss class SmoothL1LossNew(nn.Module): def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1LossNew, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CK-er/mmdet
SmoothL1Loss
false
2,065
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
CrossEntropyLoss
import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, pos_weight=None): """Calculate the binary CrossEntropy loss with logits. Args: pred (torch.Tensor): The prediction with shape (N, \\*). label (torch.Tensor): The gt label with shape (N, \\*). weight (torch.Tensor, optional): Element-wise weight of loss with shape (N, ). Defaults to None. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (torch.Tensor, optional): The positive weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ assert pred.dim() == label.dim() if class_weight is not None: N = pred.size()[0] class_weight = class_weight.repeat(N, 1) loss = F.binary_cross_entropy_with_logits(pred, label, weight= class_weight, pos_weight=pos_weight, reduction='none') if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor= None, class_weight=None): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The gt label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def soft_cross_entropy(pred, label, weight=None, reduction='mean', class_weight=None, avg_factor=None): """Calculate the Soft CrossEntropy loss. The label can be float. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The gt label of the prediction with shape (N, C). When using "mixup", the label can be float. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ loss = -label * F.log_softmax(pred, dim=-1) if class_weight is not None: loss *= class_weight loss = loss.sum(dim=-1) if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class CrossEntropyLoss(nn.Module): """Cross entropy loss. Args: use_sigmoid (bool): Whether the prediction uses sigmoid of softmax. Defaults to False. use_soft (bool): Whether to use the soft version of CrossEntropyLoss. Defaults to False. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". Defaults to 'mean'. loss_weight (float): Weight of the loss. Defaults to 1.0. class_weight (List[float], optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (List[float], optional): The positive weight for each class with shape (C), C is the number of classes. Only enabled in BCE loss when ``use_sigmoid`` is True. Default None. """ def __init__(self, use_sigmoid=False, use_soft=False, reduction='mean', loss_weight=1.0, class_weight=None, pos_weight=None): super(CrossEntropyLoss, self).__init__() self.use_sigmoid = use_sigmoid self.use_soft = use_soft assert not (self.use_soft and self.use_sigmoid ), 'use_sigmoid and use_soft could not be set simultaneously' self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.pos_weight = pos_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_soft: self.cls_criterion = soft_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.class_weight is not None: class_weight = cls_score.new_tensor(self.class_weight) else: class_weight = None if self.pos_weight is not None and self.use_sigmoid: pos_weight = cls_score.new_tensor(self.pos_weight) kwargs.update({'pos_weight': pos_weight}) else: pos_weight = None loss_cls = self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp31 = 64.0 tmp32 = tmp30 / tmp31 tmp33 = 1.0 tmp34 = tmp32 * tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2, buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, pos_weight=None): """Calculate the binary CrossEntropy loss with logits. Args: pred (torch.Tensor): The prediction with shape (N, \\*). label (torch.Tensor): The gt label with shape (N, \\*). weight (torch.Tensor, optional): Element-wise weight of loss with shape (N, ). Defaults to None. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (torch.Tensor, optional): The positive weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ assert pred.dim() == label.dim() if class_weight is not None: N = pred.size()[0] class_weight = class_weight.repeat(N, 1) loss = F.binary_cross_entropy_with_logits(pred, label, weight= class_weight, pos_weight=pos_weight, reduction='none') if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor= None, class_weight=None): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The gt label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def soft_cross_entropy(pred, label, weight=None, reduction='mean', class_weight=None, avg_factor=None): """Calculate the Soft CrossEntropy loss. The label can be float. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The gt label of the prediction with shape (N, C). When using "mixup", the label can be float. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ loss = -label * F.log_softmax(pred, dim=-1) if class_weight is not None: loss *= class_weight loss = loss.sum(dim=-1) if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class CrossEntropyLossNew(nn.Module): """Cross entropy loss. Args: use_sigmoid (bool): Whether the prediction uses sigmoid of softmax. Defaults to False. use_soft (bool): Whether to use the soft version of CrossEntropyLoss. Defaults to False. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". Defaults to 'mean'. loss_weight (float): Weight of the loss. Defaults to 1.0. class_weight (List[float], optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (List[float], optional): The positive weight for each class with shape (C), C is the number of classes. Only enabled in BCE loss when ``use_sigmoid`` is True. Default None. """ def __init__(self, use_sigmoid=False, use_soft=False, reduction='mean', loss_weight=1.0, class_weight=None, pos_weight=None): super(CrossEntropyLossNew, self).__init__() self.use_sigmoid = use_sigmoid self.use_soft = use_soft assert not (self.use_soft and self.use_sigmoid ), 'use_sigmoid and use_soft could not be set simultaneously' self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.pos_weight = pos_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_soft: self.cls_criterion = soft_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CAMP-eXplain-AI/imba-explain
CrossEntropyLoss
false
2,066
[ "MIT" ]
0
e41b4ca5de63955cb0e925aad9599f38c5a3e973
https://github.com/CAMP-eXplain-AI/imba-explain/tree/e41b4ca5de63955cb0e925aad9599f38c5a3e973
ConvBlockINEDense
import torch from torch import nn from torch.nn import init as init class ConvBlockINEDense(nn.Module): def __init__(self, n_ch, act='relu', ksize=3, norm='in', padding_mode= 'circular'): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.conv2 = nn.Conv2d(2 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.conv3 = nn.Conv2d(3 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.conv4 = nn.Conv2d(4 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.norm = norm if norm == 'in': self.norm1 = nn.InstanceNorm2d(n_ch, affine=True) self.norm2 = nn.InstanceNorm2d(n_ch, affine=True) self.norm3 = nn.InstanceNorm2d(n_ch, affine=True) def forward(self, x, g=None, b=None): x1 = self.conv1(x) x1 = self.act(x1) if self.norm == 'in': x1 = self.norm1(x1) x2 = torch.cat([x1, x], dim=1) x2 = self.conv2(x2) x2 = self.act(x2) if self.norm == 'in': x2 = self.norm2(x2) x3 = torch.cat([x2, x1, x], dim=1) x3 = self.conv3(x3) x3 = self.act(x3) if self.norm == 'in': x3 = self.norm3(x3) x4 = torch.cat([x3, x2, x1, x], dim=1) out = self.conv4(x4) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_cat_convolution_repeat_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 x2 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0 % 4, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 16, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 16.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr4 + (r3 + 16 * x1 + 128 * x2), tmp31, xmask) tl.store(out_ptr5 + (r3 + 16 * x1 + 192 * x2), tmp31, xmask) tl.store(out_ptr6 + (r3 + 16 * x1 + 256 * x2), tmp31, xmask) tl.store(out_ptr7 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 128 * x1), tmp0, xmask) tl.store(out_ptr1 + (x0 + 192 * x1), tmp0, xmask) tl.store(out_ptr2 + (x0 + 256 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_copy_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_cat_convolution_repeat_6( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 x2 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0 % 4, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 16, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 16.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr4 + (r3 + 16 * x1 + 192 * x2), tmp31, xmask) tl.store(out_ptr5 + (r3 + 16 * x1 + 256 * x2), tmp31, xmask) tl.store(out_ptr6 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) @triton.jit def triton_poi_fused_copy_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_cat_convolution_repeat_9( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 x2 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 16, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 16.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr3 + (r3 + 16 * x1 + 256 * x2), tmp31, xmask) tl.store(out_ptr4 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) @triton.jit def triton_poi_fused_copy_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(576)](primals_3, buf0, buf1, 576, XBLOCK=256, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused_1[grid(576)](buf1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf3 = extern_kernels.convolution(buf2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((16,), (1,), torch.float32) buf4 = buf3 del buf3 buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf13 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) buf11 = reinterpret_tensor(buf13, (4, 4, 4, 4), (128, 16, 4, 1), 0) buf28 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) buf26 = reinterpret_tensor(buf28, (4, 4, 4, 4), (192, 16, 4, 1), 64) buf43 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) buf41 = reinterpret_tensor(buf43, (4, 4, 4, 4), (256, 16, 4, 1), 128) buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_cat_convolution_repeat_2[grid (16)](buf4, primals_4, primals_2, primals_5, buf5, buf6, buf11, buf26, buf41, buf9, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 del primals_4 del primals_5 buf12 = reinterpret_tensor(buf13, (4, 4, 4, 4), (128, 16, 4, 1), 64) buf27 = reinterpret_tensor(buf28, (4, 4, 4, 4), (192, 16, 4, 1), 128) buf42 = reinterpret_tensor(buf43, (4, 4, 4, 4), (256, 16, 4, 1), 192) triton_poi_fused_cat_3[grid(256)](primals_3, buf12, buf27, buf42, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf14 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32 ) buf15 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32 ) triton_poi_fused_copy_4[grid(1152)](buf13, buf14, buf15, 1152, XBLOCK=256, num_warps=4, num_stages=1) del buf11 del buf12 del buf13 buf16 = buf14 del buf14 triton_poi_fused_5[grid(1152)](buf15, buf16, 1152, XBLOCK=256, num_warps=4, num_stages=1) del buf15 buf17 = extern_kernels.convolution(buf16, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 4, 4), (64, 16, 4, 1)) buf19 = empty_strided_cuda((16,), (1,), torch.float32) buf18 = buf17 del buf17 buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf25 = reinterpret_tensor(buf28, (4, 4, 4, 4), (192, 16, 4, 1), 0) buf40 = reinterpret_tensor(buf43, (4, 4, 4, 4), (256, 16, 4, 1), 64) buf23 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_cat_convolution_repeat_6[grid (16)](buf18, primals_8, primals_7, primals_9, buf19, buf20, buf25, buf40, buf23, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 del primals_8 del primals_9 buf29 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch. float32) buf30 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch. float32) triton_poi_fused_copy_7[grid(1728)](buf28, buf29, buf30, 1728, XBLOCK=128, num_warps=4, num_stages=1) del buf25 del buf26 del buf27 del buf28 buf31 = buf29 del buf29 triton_poi_fused_8[grid(1728)](buf30, buf31, 1728, XBLOCK=256, num_warps=4, num_stages=1) del buf30 buf32 = extern_kernels.convolution(buf31, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 4, 4, 4), (64, 16, 4, 1)) buf34 = empty_strided_cuda((16,), (1,), torch.float32) buf33 = buf32 del buf32 buf35 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf39 = reinterpret_tensor(buf43, (4, 4, 4, 4), (256, 16, 4, 1), 0) buf38 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_cat_convolution_repeat_9[grid (16)](buf33, primals_12, primals_11, primals_13, buf34, buf35, buf39, buf38, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_11 del primals_12 del primals_13 buf44 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch. float32) buf45 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch. float32) triton_poi_fused_copy_10[grid(2304)](buf43, buf44, buf45, 2304, XBLOCK=128, num_warps=4, num_stages=1) del buf39 del buf40 del buf41 del buf42 del buf43 buf46 = buf44 del buf44 triton_poi_fused_11[grid(2304)](buf45, buf46, 2304, XBLOCK=256, num_warps=4, num_stages=1) del buf45 buf47 = extern_kernels.convolution(buf46, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 4, 4, 4), (64, 16, 4, 1)) buf48 = buf47 del buf47 triton_poi_fused_convolution_12[grid(256)](buf48, primals_15, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 return (buf48, primals_1, primals_6, primals_10, primals_14, buf2, buf4, buf5, reinterpret_tensor(buf9, (16,), (1,), 0), buf16, buf18, buf19, reinterpret_tensor(buf23, (16,), (1,), 0), buf31, buf33, buf34, reinterpret_tensor(buf38, (16,), (1,), 0), buf46, reinterpret_tensor(buf35, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf20, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class ConvBlockINEDenseNew(nn.Module): def __init__(self, n_ch, act='relu', ksize=3, norm='in', padding_mode= 'circular'): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.conv2 = nn.Conv2d(2 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.conv3 = nn.Conv2d(3 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.conv4 = nn.Conv2d(4 * n_ch, n_ch, kernel_size=ksize, padding= padding, padding_mode=padding_mode) self.norm = norm if norm == 'in': self.norm1 = nn.InstanceNorm2d(n_ch, affine=True) self.norm2 = nn.InstanceNorm2d(n_ch, affine=True) self.norm3 = nn.InstanceNorm2d(n_ch, affine=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_4 = self.conv2.bias primals_10 = self.conv3.weight primals_5 = self.conv3.bias primals_14 = self.conv4.weight primals_7 = self.conv4.bias primals_8 = self.norm1.weight primals_9 = self.norm1.bias primals_11 = self.norm2.weight primals_12 = self.norm2.bias primals_13 = self.norm3.weight primals_15 = self.norm3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
BaekduChoi/Halftoning_v2
ConvBlockINEDense
false
2,067
[ "BSD-3-Clause" ]
0
fdb7040e1a4044f23ef9c92757bbb90c23685afe
https://github.com/BaekduChoi/Halftoning_v2/tree/fdb7040e1a4044f23ef9c92757bbb90c23685afe
GaussianFocalLoss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLoss(nn.Module): """ GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_reg = self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor) return loss_reg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = -tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp0 tmp7 = tmp6 * tmp6 tmp8 = tmp4 * tmp7 tmp10 = tmp9 == tmp5 tmp11 = tmp10.to(tl.float32) tmp12 = tmp8 * tmp11 tmp13 = tmp6 + tmp1 tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tmp0 * tmp0 tmp17 = tmp15 * tmp16 tmp18 = tmp5 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp12 + tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp25 / tmp26 tmp28 = tmp27 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLossNew(nn.Module): """ GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CK-er/mmdet
GaussianFocalLoss
false
2,068
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
MSELoss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): return F.mse_loss(pred, target, reduction='none') class MSELoss(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None): loss = self.loss_weight * mse_loss(pred, target, weight, reduction= self.reduction, avg_factor=avg_factor) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mse_loss_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): return F.mse_loss(pred, target, reduction='none') class MSELossNew(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CK-er/mmdet
MSELoss
false
2,069
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
L1Loss
import torch import torch.nn as nn import torch.nn.functional as F class L1Loss(nn.Module): """L1Loss loss .""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K Args: output (torch.Tensor[N, K, 2]): Output regression. target (torch.Tensor[N, K, 2]): Target regression. target_weight (torch.Tensor[N, K, 2]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1LossNew(nn.Module): """L1Loss loss .""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
L1Loss
false
2,070
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
SelfAttention
import torch import torch.nn as nn import torch.nn.functional as F class SelfAttention(nn.Module): def __init__(self, hidden_size, attention_size=100, n_attention_heads=1): super().__init__() self.hidden_size = hidden_size self.attention_size = attention_size self.n_attention_heads = n_attention_heads self.W1 = nn.Linear(hidden_size, attention_size, bias=False) self.W2 = nn.Linear(attention_size, n_attention_heads, bias=False) def forward(self, hidden): hidden = hidden.transpose(0, 1) x = torch.tanh(self.W1(hidden)) x = F.softmax(self.W2(x), dim=1) A = x.transpose(1, 2) M = A @ hidden return M, A def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (100, 4), (4, 1)) assert_size_stride(primals_3, (1, 100), (100, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 100), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 100), (400, 100, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(1600)](buf2, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 100), (100, 1), 0), reinterpret_tensor(primals_3, (100, 1), (1, 100), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0) del buf3 triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out =buf6) return buf6, reinterpret_tensor(buf5, (4, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf2, buf5, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0 ), primals_3 class SelfAttentionNew(nn.Module): def __init__(self, hidden_size, attention_size=100, n_attention_heads=1): super().__init__() self.hidden_size = hidden_size self.attention_size = attention_size self.n_attention_heads = n_attention_heads self.W1 = nn.Linear(hidden_size, attention_size, bias=False) self.W2 = nn.Linear(attention_size, n_attention_heads, bias=False) def forward(self, input_0): primals_2 = self.W1.weight primals_3 = self.W2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
CS475-NLP/cs475-nlp-project
SelfAttention
false
2,071
[ "MIT" ]
0
d73ec7d4b08abd3a5ba6445b99705fe8716a0151
https://github.com/CS475-NLP/cs475-nlp-project/tree/d73ec7d4b08abd3a5ba6445b99705fe8716a0151
GHMC
import torch import torch.nn as nn import torch.nn.functional as F def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero((labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights class GHMC(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper "Gradient Harmonized Single-stage Detector". https://arxiv.org/abs/1811.05181 Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-06 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, pred, target, label_weight, *args, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch_num, class_num]): The direct prediction of classification fc layer. target (float tensor of size [batch_num, class_num]): Binary class target for each sample. label_weight (float tensor of size [batch_num, class_num]): the value is 1 if the sample is valid and 0 if ignored. Returns: The gradient harmonized loss. """ if pred.dim() != target.dim(): target, label_weight = _expand_onehot_labels(target, label_weight, pred.size(-1)) target, label_weight = target.float(), label_weight.float() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt ) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp6, None) @triton.jit def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_abs_sigmoid_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.abs(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_gt_sum_0[grid(1)](arg2_1, buf0, buf1, 1, 256, num_warps=2, num_stages=1) del arg2_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_zeros_like_1[grid(256)](buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_abs_sigmoid_sub_2[grid(256)](arg0_1, arg1_1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf1, arg1_1, buf2, buf3, buf0 def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero((labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights class GHMCNew(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper "Gradient Harmonized Single-stage Detector". https://arxiv.org/abs/1811.05181 Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMCNew, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-06 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
CK-er/mmdet
GHMC
false
2,072
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
BalancedL1Loss
import functools import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) b = np.e ** (gamma / alpha) - 1 loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log( b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss class BalancedL1Loss(nn.Module): """Balanced L1 Loss arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * balanced_l1_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 19.085536923187664 tmp7 = tmp3 * tmp6 tmp8 = tmp7 + tmp4 tmp9 = 0.02619784824562798 tmp10 = tmp8 * tmp9 tmp11 = tmp7 * tmp4 tmp12 = tmp11 + tmp4 tmp13 = tl_math.log(tmp12) tmp14 = tmp10 * tmp13 tmp15 = 0.5 tmp16 = tmp3 * tmp15 tmp17 = tmp14 - tmp16 tmp18 = 1.5 tmp19 = tmp3 * tmp18 tmp20 = 0.07859354473688394 tmp21 = tmp19 + tmp20 tmp22 = tmp21 - tmp15 tmp23 = tl.where(tmp5, tmp17, tmp22) tmp24 = tl.broadcast_to(tmp23, [RBLOCK]) tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0)) tmp27 = 256.0 tmp28 = tmp26 / tmp27 tmp29 = tmp28 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) b = np.e ** (gamma / alpha) - 1 loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log( b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss class BalancedL1LossNew(nn.Module): """Balanced L1 Loss arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1LossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CK-er/mmdet
BalancedL1Loss
false
2,073
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
GMP
import torch class GMP(torch.nn.Module): """A global max pooling module. Args: dim (int): The dimension at which to compute the maximum. """ def __init__(self, dim: 'int'): super().__init__() self.dim = dim def forward(self, x): return x.max(dim=self.dim)[0] def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GMPNew(torch.nn.Module): """A global max pooling module. Args: dim (int): The dimension at which to compute the maximum. """ def __init__(self, dim: 'int'): super().__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CLARITI-REPHRAIN/mumin-trawl
GMP
false
2,074
[ "MIT" ]
0
8a7eda49d8740e927332cd3972750d0b54c23eb1
https://github.com/CLARITI-REPHRAIN/mumin-trawl/tree/8a7eda49d8740e927332cd3972750d0b54c23eb1
CombinedTargetMSELoss
import torch import torch.nn as nn class CombinedTargetMSELoss(nn.Module): """MSE loss for combined target. CombinedTarget: The combination of classification target (response map) and regression target (offset map). Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight, loss_weight=1.0): super().__init__() self.criterion = nn.MSELoss(reduction='mean') self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight): batch_size = output.size(0) num_channels = output.size(1) heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split( 1, 1) heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1 ) loss = 0.0 num_joints = num_channels // 3 for idx in range(num_joints): heatmap_pred = heatmaps_pred[idx * 3].squeeze() heatmap_gt = heatmaps_gt[idx * 3].squeeze() offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze() offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze() offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze() offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze() if self.use_target_weight: heatmap_pred = heatmap_pred * target_weight[:, idx] heatmap_gt = heatmap_gt * target_weight[:, idx] loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred, heatmap_gt * offset_x_gt) loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred, heatmap_gt * offset_y_gt) return loss / num_joints * self.loss_weight def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'use_target_weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp4 * tmp10 tmp13 = tmp4 * tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp20 = tmp4 * tmp19 tmp22 = tmp4 * tmp21 tmp23 = tmp20 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 4.0 tmp29 = tmp9 / tmp28 tmp30 = 0.5 tmp31 = tmp29 * tmp30 tmp32 = 0.0 tmp33 = tmp31 + tmp32 tmp34 = tmp18 / tmp28 tmp35 = tmp34 * tmp30 tmp36 = tmp33 + tmp35 tmp37 = tmp27 / tmp28 tmp38 = tmp37 * tmp30 tmp39 = tmp36 + tmp38 tmp40 = 1.0 tmp41 = tmp39 * tmp40 tmp42 = tmp41 * tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf3, arg0_1, arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf3, class CombinedTargetMSELossNew(nn.Module): """MSE loss for combined target. CombinedTarget: The combination of classification target (response map) and regression target (offset map). Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight, loss_weight=1.0): super().__init__() self.criterion = nn.MSELoss(reduction='mean') self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ALISCIFP/mmpose
CombinedTargetMSELoss
false
2,075
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
Embedding
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class Embedding(nn.Module): def __init__(self, in_dim, out_dim): super(Embedding, self).__init__() self.linear = nn.Linear(in_dim, out_dim) self.tanh = nn.Tanh() def forward(self, x): x = self.linear(x) x = self.tanh(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class EmbeddingNew(nn.Module): def __init__(self, in_dim, out_dim): super(EmbeddingNew, self).__init__() self.linear = nn.Linear(in_dim, out_dim) self.tanh = nn.Tanh() def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CFM-MSG/Code_TFUN
Embedding
false
2,076
[ "MIT" ]
0
39aebd748a0191e532eb81144386741e98a58e73
https://github.com/CFM-MSG/Code_TFUN/tree/39aebd748a0191e532eb81144386741e98a58e73
Norm
import torch import torch.nn as nn class Norm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.size = d_model self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim =-1, keepdim=True) + self.eps) + self.bias return norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class NormNew(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.size = d_model self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, input_0): primals_1 = self.alpha primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CS-savvy/Transformer-for-Parkinsons-disease
Norm
false
2,077
[ "MIT" ]
0
42ef54071092f4aab74c8b9ec82c52e944806a5b
https://github.com/CS-savvy/Transformer-for-Parkinsons-disease/tree/42ef54071092f4aab74c8b9ec82c52e944806a5b
MemoryEfficientSwish
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class SwishImplementation(torch.autograd.Function): @staticmethod def forward(ctx, i): result = i * torch.sigmoid(i) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): i = ctx.saved_variables[0] sigmoid_i = torch.sigmoid(i) return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) class MemoryEfficientSwish(nn.Module): def forward(self, x): return SwishImplementation.apply(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SwishImplementation(torch.autograd.Function): @staticmethod def forward(ctx, i): result = i * torch.sigmoid(i) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): i = ctx.saved_variables[0] sigmoid_i = torch.sigmoid(i) return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) class MemoryEfficientSwishNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BradleyBrown19/CustomObjectDetector
MemoryEfficientSwish
false
2,078
[ "Apache-2.0" ]
0
11c14ec6127c553ac365703c768b75dde33d9a4d
https://github.com/BradleyBrown19/CustomObjectDetector/tree/11c14ec6127c553ac365703c768b75dde33d9a4d
NormImageUint8ToFloat
from torch.nn import Module import torch class NormImageUint8ToFloat(Module): def forward(self, im): return 2.0 * (im / 255.0 - 0.5) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tmp3 = 0.5 tmp4 = tmp2 - tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormImageUint8ToFloatNew(Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CeadeS/PyTorchH5Dataset
NormImageUint8ToFloat
false
2,079
[ "BSD-3-Clause" ]
0
9ee6e49f2a780345abd708abf2e0c47bb5475e0a
https://github.com/CeadeS/PyTorchH5Dataset/tree/9ee6e49f2a780345abd708abf2e0c47bb5475e0a
KLDLoss
import torch from torch import nn class KLDLoss(nn.Module): def __init__(self, reduction='sum'): super(KLDLoss, self).__init__() self.reduction = reduction def forward(self, mean, logvar): kld_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp(), 1) if self.reduction == 'mean': kld_loss = torch.mean(kld_loss) elif self.reduction == 'sum': kld_loss = torch.sum(kld_loss) return kld_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp3 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp18 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp9 = tmp8 + tmp1 tmp11 = tmp10 * tmp10 tmp12 = tmp9 - tmp11 tmp13 = tl_math.exp(tmp8) tmp14 = tmp12 - tmp13 tmp15 = tmp7 + tmp14 tmp17 = tmp16 + tmp1 tmp19 = tmp18 * tmp18 tmp20 = tmp17 - tmp19 tmp21 = tl_math.exp(tmp16) tmp22 = tmp20 - tmp21 tmp23 = tmp15 + tmp22 tmp25 = tmp24 + tmp1 tmp27 = tmp26 * tmp26 tmp28 = tmp25 - tmp27 tmp29 = tl_math.exp(tmp24) tmp30 = tmp28 - tmp29 tmp31 = tmp23 + tmp30 tmp32 = -0.5 tmp33 = tmp31 * tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0[grid(1)](arg0_1, arg1_1, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class KLDLossNew(nn.Module): def __init__(self, reduction='sum'): super(KLDLossNew, self).__init__() self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Cc618/Feature-Changer
KLDLoss
false
2,080
[ "MIT" ]
0
7ab82f525c4b5142afec1819732b0fb5f3983152
https://github.com/Cc618/Feature-Changer/tree/7ab82f525c4b5142afec1819732b0fb5f3983152
Normalize
import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_pow_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): def __init__(self, power=2): super(NormalizeNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Alice1820/CMC
Normalize
false
2,081
[ "BSD-2-Clause" ]
0
4f4354b3a33ec9c0784baefd7d1d9798e191ead5
https://github.com/Alice1820/CMC/tree/4f4354b3a33ec9c0784baefd7d1d9798e191ead5
Policy
import torch import torch.nn as nn import torch.nn.functional as F from functools import * class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) self.dropout = nn.Dropout(p=0.6) self.affine2 = nn.Linear(128, 2) self.saved_log_probs = [] self.rewards = [] def forward(self, x): x = self.affine1(x) x = self.dropout(x) x = F.relu(x) action_scores = self.affine2(x) return F.softmax(action_scores, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from functools import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 128), (128, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(128)](buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_4, buf5 class PolicyNew(nn.Module): def __init__(self): super(PolicyNew, self).__init__() self.affine1 = nn.Linear(4, 128) self.dropout = nn.Dropout(p=0.6) self.affine2 = nn.Linear(128, 2) self.saved_log_probs = [] self.rewards = [] def forward(self, input_0): primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AJSVB/GPBT
Policy
false
2,082
[ "MIT" ]
0
746c11d06ecc4c3b62fc0a3290d672d336cbb11e
https://github.com/AJSVB/GPBT/tree/746c11d06ecc4c3b62fc0a3290d672d336cbb11e
Conv2d
import torch import torch.nn as nn from torch.nn import functional as F class Conv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True ).mean(dim=3, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1 ) + 1e-05 weight = weight / std.expand_as(weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp30 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x2, tmp36, xmask) @triton.jit def triton_per_fused_div_mean_std_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tl.where(xmask, tmp11, 0) tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tl.full([XBLOCK, 1], 64, tl.int32) tmp19 = tmp18.to(tl.float32) tmp20 = tmp17 / tmp19 tmp21 = tmp11 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.sum(tmp25, 1)[:, None] tmp27 = 63.0 tmp28 = tmp26 / tmp27 tmp29 = libdevice.sqrt(tmp28) tmp30 = 1e-05 tmp31 = tmp29 + tmp30 tmp32 = tmp10 / tmp31 tl.store(out_ptr0 + (r1 + 64 * x0), tmp10, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp32, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf5 = buf3 del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_div_mean_std_sub_1[grid(4)](buf5, primals_1, buf0, buf1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(16)](buf8, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf8, primals_1, primals_3, buf5, buf6 class Conv2dNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CarlosFora/DeepLabv3.pytorch
Conv2d
false
2,083
[ "BSD-3-Clause" ]
0
f590f8f93c0c2e72b71f60c78450d92f93db2511
https://github.com/CarlosFora/DeepLabv3.pytorch/tree/f590f8f93c0c2e72b71f60c78450d92f93db2511
GlobalAvgPool2d
import torch import torch.nn as nn class GlobalAvgPool2d(nn.Module): def __init__(self): """Global average pooling over the input's spatial dimensions""" super(GlobalAvgPool2d, self).__init__() def forward(self, inputs): in_size = inputs.size() return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class GlobalAvgPool2dNew(nn.Module): def __init__(self): """Global average pooling over the input's spatial dimensions""" super(GlobalAvgPool2dNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ChenyangWang1/face_parsing
GlobalAvgPool2d
false
2,084
[ "MIT" ]
0
506e74eb8a2094920c03f2fe0774656b1043e8a6
https://github.com/ChenyangWang1/face_parsing/tree/506e74eb8a2094920c03f2fe0774656b1043e8a6
CLOSS
import torch import torch.nn as nn import torch.nn.functional as F class CLOSS(nn.Module): def __init__(self, m=1.0): super().__init__() self.m = m def forward(self, pp_pair, pn_pair): basic_loss = F.sigmoid(pp_pair) - F.sigmoid(pn_pair) + self.m loss = torch.max(torch.zeros_like(basic_loss), basic_loss).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_maximum_mean_sigmoid_sub_zeros_like_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 - tmp3 tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 256.0 tmp13 = tmp11 / tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_maximum_mean_sigmoid_sub_zeros_like_0[grid(1)]( buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class CLOSSNew(nn.Module): def __init__(self, m=1.0): super().__init__() self.m = m def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CharonWangg/Turtle_Soup_Generator
CLOSS
false
2,085
[ "MIT" ]
0
18ab621f8a8e3998b7fcf8c8eb678af7335abf87
https://github.com/CharonWangg/Turtle_Soup_Generator/tree/18ab621f8a8e3998b7fcf8c8eb678af7335abf87
SigmoidFocalClassificationLoss
import torch import torch.nn as nn class SigmoidFocalClassificationLoss(nn.Module): """ Sigmoid focal cross entropy loss. """ def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25): """ Args: gamma: Weighting parameter to balance loss for hard and easy examples. alpha: Weighting parameter to balance loss for positive and negative examples. """ super(SigmoidFocalClassificationLoss, self).__init__() self.alpha = alpha self.gamma = gamma @staticmethod def sigmoid_cross_entropy_with_logits(input: 'torch.Tensor', target: 'torch.Tensor'): """ PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits: max(x, 0) - x * z + log(1 + exp(-abs(x))) in https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits Args: input: (B, #anchors, #classes) float tensor. Predicted logits for each class target: (B, #anchors, #classes) float tensor. One-hot encoded classification targets Returns: loss: (B, #anchors, #classes) float tensor. Sigmoid cross entropy loss without reduction """ loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch .exp(-torch.abs(input))) return loss def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', weights: 'torch.Tensor'): """ Args: input: (B, #anchors, #classes) float tensor. Predicted logits for each class target: (B, #anchors, #classes) float tensor. One-hot encoded classification targets weights: (B, #anchors) float tensor. Anchor-wise weights. Returns: weighted_loss: (B, #anchors, #classes) float tensor after weighting. """ pred_sigmoid = torch.sigmoid(input) alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha) pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid focal_weight = alpha_weight * torch.pow(pt, self.gamma) bce_loss = self.sigmoid_cross_entropy_with_logits(input, target) loss = focal_weight * bce_loss if weights.shape.__len__() == 2 or weights.shape.__len__( ) == 1 and target.shape.__len__() == 2: weights = weights.unsqueeze(-1) assert weights.shape.__len__() == loss.shape.__len__() return loss * weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0( in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp26 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = 0.75 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tl.sigmoid(tmp8) tmp10 = tmp3 - tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp4 * tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp7 * tmp14 tmp16 = 0.0 tmp17 = triton_helpers.maximum(tmp8, tmp16) tmp18 = tmp8 * tmp0 tmp19 = tmp17 - tmp18 tmp20 = tl_math.abs(tmp8) tmp21 = -tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = libdevice.log1p(tmp22) tmp24 = tmp19 + tmp23 tmp25 = tmp15 * tmp24 tmp27 = tmp25 * tmp26 tl.store(out_ptr0 + x0, tmp27, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0[ grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class SigmoidFocalClassificationLossNew(nn.Module): """ Sigmoid focal cross entropy loss. """ def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25): """ Args: gamma: Weighting parameter to balance loss for hard and easy examples. alpha: Weighting parameter to balance loss for positive and negative examples. """ super(SigmoidFocalClassificationLossNew, self).__init__() self.alpha = alpha self.gamma = gamma @staticmethod def sigmoid_cross_entropy_with_logits(input: 'torch.Tensor', target: 'torch.Tensor'): """ PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits: max(x, 0) - x * z + log(1 + exp(-abs(x))) in https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits Args: input: (B, #anchors, #classes) float tensor. Predicted logits for each class target: (B, #anchors, #classes) float tensor. One-hot encoded classification targets Returns: loss: (B, #anchors, #classes) float tensor. Sigmoid cross entropy loss without reduction """ loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch .exp(-torch.abs(input))) return loss def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
CSL-KU/OpenPCDet
SigmoidFocalClassificationLoss
false
2,086
[ "Apache-2.0" ]
0
2c5fca0da1521add4b40e6cdfe75d02d4285b83f
https://github.com/CSL-KU/OpenPCDet/tree/2c5fca0da1521add4b40e6cdfe75d02d4285b83f
ExampleBackbone
import torch import torch.nn as nn class ExampleBackbone(nn.Module): def __init__(self): super(ExampleBackbone, self).__init__() self.conv = nn.Conv2d(3, 3, 3) def init_weights(self, pretrained=None): pass def forward(self, x): return [self.conv(x)] def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(46128)](buf1, primals_2, 46128, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class ExampleBackboneNew(nn.Module): def __init__(self): super(ExampleBackboneNew, self).__init__() self.conv = nn.Conv2d(3, 3, 3) def init_weights(self, pretrained=None): pass def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ChenDirk/mmrazor
ExampleBackbone
false
2,087
[ "Apache-2.0" ]
0
6f262ecd777c15efd4ee2d191cdc567071615421
https://github.com/ChenDirk/mmrazor/tree/6f262ecd777c15efd4ee2d191cdc567071615421
KLDivergence
import torch import torch.nn as nn import torch.nn.functional as F class KLDivergence(nn.Module): """A measure of how one probability distribution Q is different from a second, reference probability distribution P. Args: tau (float): Temperature coefficient. Defaults to 1.0. reduction (str): Specifies the reduction to apply to the loss: ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. ``'none'``: no reduction will be applied, ``'batchmean'``: the sum of the output will be divided by the batchsize, ``'sum'``: the output will be summed, ``'mean'``: the output will be divided by the number of elements in the output. Default: ``'batchmean'`` loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, tau=1.0, reduction='batchmean', loss_weight=1.0): super(KLDivergence, self).__init__() self.tau = tau self.loss_weight = loss_weight accept_reduction = {'none', 'batchmean', 'sum', 'mean'} assert reduction in accept_reduction, f'KLDivergence supports reduction {accept_reduction}, but gets {reduction}.' self.reduction = reduction def forward(self, preds_S, preds_T): """Forward computation. Args: preds_S (torch.Tensor): The student model prediction with shape (N, C, H, W) or shape (N, C). preds_T (torch.Tensor): The teacher model prediction with shape (N, C, H, W) or shape (N, C). Return: torch.Tensor: The calculated loss value. """ preds_T = preds_T.detach() softmax_pred_T = F.softmax(preds_T / self.tau, dim=1) logsoftmax_preds_S = F.log_softmax(preds_S / self.tau, dim=1) loss = self.tau ** 2 * F.kl_div(logsoftmax_preds_S, softmax_pred_T, reduction=self.reduction) return self.loss_weight * loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tmp40 = tmp39 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp40, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg1_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class KLDivergenceNew(nn.Module): """A measure of how one probability distribution Q is different from a second, reference probability distribution P. Args: tau (float): Temperature coefficient. Defaults to 1.0. reduction (str): Specifies the reduction to apply to the loss: ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. ``'none'``: no reduction will be applied, ``'batchmean'``: the sum of the output will be divided by the batchsize, ``'sum'``: the output will be summed, ``'mean'``: the output will be divided by the number of elements in the output. Default: ``'batchmean'`` loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, tau=1.0, reduction='batchmean', loss_weight=1.0): super(KLDivergenceNew, self).__init__() self.tau = tau self.loss_weight = loss_weight accept_reduction = {'none', 'batchmean', 'sum', 'mean'} assert reduction in accept_reduction, f'KLDivergence supports reduction {accept_reduction}, but gets {reduction}.' self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ChenDirk/mmrazor
KLDivergence
false
2,088
[ "Apache-2.0" ]
0
6f262ecd777c15efd4ee2d191cdc567071615421
https://github.com/ChenDirk/mmrazor/tree/6f262ecd777c15efd4ee2d191cdc567071615421
PositionwiseFeedForward
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torch.multiprocessing class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_hid) def forward(self, x): output = self.w_1(x.transpose(1, 2)).transpose(1, 2) output = F.relu(self.layer_norm(output)) output = self.w_2(output.transpose(1, 2)).transpose(1, 2) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data import torch.multiprocessing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_convolution_native_layer_norm_relu_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y1 = yindex // 4 y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1, 1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask) tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp10, xmask & ymask) tl.store(out_ptr2 + (x2 + 4 * y3), tmp10, xmask & ymask) @triton.jit def triton_poi_fused_threshold_backward_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_2[grid(16)](buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_native_layer_norm_relu_transpose_3[grid (16, 4)](buf2, buf3, buf4, primals_4, primals_5, buf5, buf6, buf7, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del buf3 del buf4 del primals_5 buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4), (16, 4, 1)) del buf7 buf9 = buf8 del buf8 triton_poi_fused_convolution_1[grid(64)](buf9, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_threshold_backward_4[grid(16, 4)](buf5, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf5 return reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0 ), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, ( 4, 4, 4), (16, 1, 4), 0), buf2, buf6, buf10 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_hid) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_6 = self.w_2.weight primals_4 = self.w_2.bias primals_5 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Caiyuan-Zheng/Consistency_Regularization_STR
PositionwiseFeedForward
false
2,089
[ "MIT" ]
0
7c7ce69390c429974cb2d1969b0d9d6707e6723f
https://github.com/Caiyuan-Zheng/Consistency_Regularization_STR/tree/7c7ce69390c429974cb2d1969b0d9d6707e6723f
ConvWS2d
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05): c_in = weight.size(0) weight_flat = weight.view(c_in, -1) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = (weight - mean) / (std + eps) return F.conv2d(input, weight, bias, stride, padding, dilation, groups) class ConvWS2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05): super(ConvWS2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): return conv_ws_2d(x, self.weight, self.bias, self.stride, self. padding, self.dilation, self.groups, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tmp0 - tmp20 tmp25 = 1e-05 tmp26 = tmp23 + tmp25 tmp27 = tmp24 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp23, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp27, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0) del buf0 buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_std_sub_0[grid(4)](buf1, buf5, primals_1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_1[grid(16)](buf8, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf8, primals_1, primals_3, buf1, buf5, buf6 def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05): c_in = weight.size(0) weight_flat = weight.view(c_in, -1) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = (weight - mean) / (std + eps) return F.conv2d(input, weight, bias, stride, padding, dilation, groups) class ConvWS2dNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05): super(ConvWS2dNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
BradleyBrown19/CustomObjectDetector
ConvWS2d
false
2,090
[ "Apache-2.0" ]
0
11c14ec6127c553ac365703c768b75dde33d9a4d
https://github.com/BradleyBrown19/CustomObjectDetector/tree/11c14ec6127c553ac365703c768b75dde33d9a4d