entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ResidualBlock
import torch import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_planes, planes, norm_layer=nn.InstanceNorm2d, stride=1, dilation=1): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, dilation= dilation, padding=dilation, stride=stride, bias=False) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, dilation= dilation, padding=dilation, bias=False) self.relu = nn.ReLU(inplace=True) self.norm1 = norm_layer(planes) self.norm2 = norm_layer(planes) if not stride == 1 or in_planes != planes: self.norm3 = norm_layer(planes) if stride == 1 and in_planes == planes: self.downsample = None else: self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) def forward(self, x): y = x y = self.relu(self.norm1(self.conv1(y))) y = self.relu(self.norm2(self.conv2(y))) if self.downsample is not None: x = self.downsample(x) return self.relu(x + y) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + 16 * x0), tmp25, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_1( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp23 = tmp0 - tmp10 tmp24 = tmp23 * tmp21 tmp25 = tl.full([1, 1], 0, tl.int32) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp27 = tmp22 + tmp26 tmp28 = triton_helpers.maximum(tmp25, tmp27) tmp29 = 0.0 tmp30 = tmp28 <= tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp28, xmask) tl.store(out_ptr2 + (r1 + 16 * x0), tmp30, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0[grid(16)](buf0, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf10 = reinterpret_tensor(buf8, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf8 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_1[ grid(16)](buf10, buf6, primals_1, buf7, buf11, buf12, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor( buf4, (16,), (1,), 0 ), buf5, buf6, buf7, buf10, buf12, reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) class ResidualBlockNew(nn.Module): def __init__(self, in_planes, planes, norm_layer=nn.InstanceNorm2d, stride=1, dilation=1): super(ResidualBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, dilation= dilation, padding=dilation, stride=stride, bias=False) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, dilation= dilation, padding=dilation, bias=False) self.relu = nn.ReLU(inplace=True) self.norm1 = norm_layer(planes) self.norm2 = norm_layer(planes) if not stride == 1 or in_planes != planes: self.norm3 = norm_layer(planes) if stride == 1 and in_planes == planes: self.downsample = None else: self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
haofeixu/gmflow
ResidualBlock
false
15,489
[ "Apache-2.0" ]
58
d304e5e516c11df378d63808d6679aea43bc564a
https://github.com/haofeixu/gmflow/tree/d304e5e516c11df378d63808d6679aea43bc564a
ConvReLUNorm
import torch import torch.cuda import torch.distributed import torch.utils.data import torch.optim class ConvReLUNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNorm, self).__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, padding=kernel_size // 2) self.norm = torch.nn.LayerNorm(out_channels) self.dropout = torch.nn.Dropout(dropout) def forward(self, signal): out = torch.nn.functional.relu(self.conv(signal)) out = self.norm(out.transpose(1, 2)).transpose(1, 2) return self.dropout(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.cuda import torch.distributed import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp13 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp7 - tmp13 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 - tmp13 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp12 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tl.store(out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(16, 4)](buf1, buf2, buf3, primals_4, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf2 del buf3 del primals_5 return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0 ), primals_1, primals_3, primals_4, buf1 class ConvReLUNormNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNormNew, self).__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, padding=kernel_size // 2) self.norm = torch.nn.LayerNorm(out_channels) self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.norm.weight primals_5 = self.norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
hamjam/NeMo
ConvReLUNorm
false
15,490
[ "Apache-2.0" ]
4,145
b3484d32e1317666151f931bfa39867d88ed8658
https://github.com/hamjam/NeMo/tree/b3484d32e1317666151f931bfa39867d88ed8658
PermEqui1_mean
import torch import torch.nn as nn class PermEqui1_mean(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui1_mean, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) def forward(self, x): xm = x.mean(1, keepdim=True) x = self.Gamma(x - xm) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class PermEqui1_meanNew(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui1_meanNew, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) def forward(self, input_0): primals_2 = self.Gamma.weight primals_3 = self.Gamma.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
haoruilee/DeepSets
PermEqui1_mean
false
15,491
[ "Apache-2.0" ]
213
b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
https://github.com/haoruilee/DeepSets/tree/b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
PermEqui2_max
import torch import torch.nn as nn class PermEqui2_max(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui2_max, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) def forward(self, x): xm, _ = x.max(1, keepdim=True) xm = self.Lambda(xm) x = self.Gamma(x) x = x - xm return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 64 x5 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_sub_1[grid(256)](buf3, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 return buf3, primals_1 class PermEqui2_maxNew(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui2_maxNew, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) def forward(self, input_0): primals_2 = self.Gamma.weight primals_4 = self.Gamma.bias primals_3 = self.Lambda.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
haoruilee/DeepSets
PermEqui2_max
false
15,492
[ "Apache-2.0" ]
213
b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
https://github.com/haoruilee/DeepSets/tree/b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
AttentionSelf
import torch class AttentionSelf(torch.nn.Module): def __init__(self, input_size, hidden_size, device=torch.device('cpu')): """ implementation of self-attention. """ super().__init__() self.ff1 = torch.nn.Linear(input_size, hidden_size) self.ff2 = torch.nn.Linear(hidden_size, 1, bias=False) def forward(self, input_, mask=None): """ input vector: input_ output: attn_: attention weights cv: context vector """ attn_ = torch.tanh(self.ff1(input_)) attn_ = self.ff2(attn_).squeeze(2) if mask is not None: attn_ = attn_.masked_fill(mask == 0, -1000000000.0) attn_ = torch.softmax(attn_, dim=1) ctx_vec = torch.bmm(attn_.unsqueeze(1), input_).squeeze(1) return attn_, ctx_vec def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0 ), primals_3, out=buf5) return buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0 ), primals_3, buf1, buf4, primals_4 class AttentionSelfNew(torch.nn.Module): def __init__(self, input_size, hidden_size, device=torch.device('cpu')): """ implementation of self-attention. """ super().__init__() self.ff1 = torch.nn.Linear(input_size, hidden_size) self.ff2 = torch.nn.Linear(hidden_size, 1, bias=False) def forward(self, input_0): primals_1 = self.ff1.weight primals_2 = self.ff1.bias primals_4 = self.ff2.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
haophancs/TREQS
AttentionSelf
false
15,493
[ "MIT" ]
149
49e354ce2a08cf963ec139d99936020e0f80ced8
https://github.com/haophancs/TREQS/tree/49e354ce2a08cf963ec139d99936020e0f80ced8
PermEqui2_mean
import torch import torch.nn as nn class PermEqui2_mean(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui2_mean, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) def forward(self, x): xm = x.mean(1, keepdim=True) xm = self.Lambda(xm) x = self.Gamma(x) x = x - xm return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 64 x5 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_sub_1[grid(256)](buf3, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0) class PermEqui2_meanNew(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui2_meanNew, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) def forward(self, input_0): primals_2 = self.Gamma.weight primals_4 = self.Gamma.bias primals_3 = self.Lambda.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
haoruilee/DeepSets
PermEqui2_mean
false
15,495
[ "Apache-2.0" ]
213
b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
https://github.com/haoruilee/DeepSets/tree/b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
CrossAttention
import torch class CrossAttention(torch.nn.Module): """ Implement of Co-attention. """ def __init__(self): super().__init__() def forward(self, inputA, inputB, maskA=None, maskB=None): """ Input: embedding. """ inputA.size(0) assert inputA.size(-1) == inputB.size(-1) scores = torch.bmm(inputA, inputB.transpose(1, 2)) if maskA is not None and maskB is not None: maskA = maskA[:, :, None] maskB = maskB[:, None, :] mask = torch.bmm(maskA, maskB) scores = scores.masked_fill(mask == 0, -1000000000.0) attnA = torch.softmax(scores, 1) attnB = torch.softmax(scores, 2) cvA = torch.bmm(attnA.transpose(1, 2), inputA) cvB = torch.bmm(attnB, inputB) return cvA, cvB def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp0 - tmp16 tmp18 = tl_math.exp(tmp17) tl.store(out_ptr0 + x4, tmp9, xmask) tl.store(out_ptr1 + x4, tmp18, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), arg0_1, out=buf3) del arg0_1 buf5 = buf2 del buf2 triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 extern_kernels.bmm(buf5, arg1_1, out=buf6) del arg1_1 del buf5 return buf3, buf6 class CrossAttentionNew(torch.nn.Module): """ Implement of Co-attention. """ def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
haophancs/TREQS
CrossAttention
false
15,496
[ "MIT" ]
149
49e354ce2a08cf963ec139d99936020e0f80ced8
https://github.com/haophancs/TREQS/tree/49e354ce2a08cf963ec139d99936020e0f80ced8
PermEqui1_max
import torch import torch.nn as nn class PermEqui1_max(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui1_max, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) def forward(self, x): xm, _ = x.max(1, keepdim=True) x = self.Gamma(x - xm) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class PermEqui1_maxNew(nn.Module): def __init__(self, in_dim, out_dim): super(PermEqui1_maxNew, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) def forward(self, input_0): primals_2 = self.Gamma.weight primals_3 = self.Gamma.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
haoruilee/DeepSets
PermEqui1_max
false
15,497
[ "Apache-2.0" ]
213
b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
https://github.com/haoruilee/DeepSets/tree/b405dd6b51a34fb1ef622e25e6685b417b7b7cbb
CompressionFM
import torch class CompressionFM(torch.nn.Module): """ FM layer """ def __init__(self, input_size, fm_size): super(CompressionFM, self).__init__() self.LW = torch.nn.Linear(input_size, 1) self.QV = torch.nn.Parameter(torch.randn(input_size, fm_size)) def forward(self, input_): """ Factor Machine Implementation. """ size_input = input_.size() input_ = input_.contiguous().view(-1, input_.size(-1)) h0 = self.LW(input_) v1 = torch.mm(input_, self.QV) v1 = v1 * v1 v2 = torch.mm(input_ * input_, self.QV * self.QV) vcat = torch.sum(v1 - v2, 1) fm = h0.squeeze() + 0.5 * vcat fm = fm.view(size_input[0], size_input[1], 1) return fm def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'fm_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_mul_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp4 * tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp8 * tmp8 tmp11 = tmp9 - tmp10 tmp12 = tmp7 + tmp11 tmp14 = tmp13 * tmp13 tmp16 = tmp14 - tmp15 tmp17 = tmp12 + tmp16 tmp19 = tmp18 * tmp18 tmp21 = tmp19 - tmp20 tmp22 = tmp17 + tmp21 tmp23 = 0.5 tmp24 = tmp22 * tmp23 tmp25 = tmp3 + tmp24 tl.store(in_out_ptr0 + x0, tmp25, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_1[grid(16)](primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, buf3, out=buf4) del buf3 buf5 = reinterpret_tensor(buf0, (16,), (1,), 0) del buf0 triton_poi_fused_add_mul_sub_sum_2[grid(16)](buf5, primals_3, buf1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del primals_3 return reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 1), 0 ), primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (4, 16), (1, 4), 0) class CompressionFMNew(torch.nn.Module): """ FM layer """ def __init__(self, input_size, fm_size): super(CompressionFMNew, self).__init__() self.LW = torch.nn.Linear(input_size, 1) self.QV = torch.nn.Parameter(torch.randn(input_size, fm_size)) def forward(self, input_0): primals_4 = self.QV primals_2 = self.LW.weight primals_3 = self.LW.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
haophancs/TREQS
CompressionFM
false
15,498
[ "MIT" ]
149
49e354ce2a08cf963ec139d99936020e0f80ced8
https://github.com/haophancs/TREQS/tree/49e354ce2a08cf963ec139d99936020e0f80ced8
GateLayer
import torch from torch import nn class GateLayer(nn.Module): def __init__(self, input_dim): super(GateLayer, self).__init__() self._norm_layer1 = nn.Linear(input_dim * 2, input_dim) self._norm_layer2 = nn.Linear(input_dim, 1) def forward(self, input1, input2): norm_input = self._norm_layer1(torch.cat([input1, input2], dim=-1)) gate = torch.sigmoid(self._norm_layer2(norm_input)) gated_emb = gate * input1 + (1 - gate) * input2 return gated_emb def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp6 = tl.load(in_ptr2 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_1[grid(256)](buf3, primals_1, primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_2, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf1, buf3, primals_5 class GateLayerNew(nn.Module): def __init__(self, input_dim): super(GateLayerNew, self).__init__() self._norm_layer1 = nn.Linear(input_dim * 2, input_dim) self._norm_layer2 = nn.Linear(input_dim, 1) def forward(self, input_0, input_1): primals_3 = self._norm_layer1.weight primals_4 = self._norm_layer1.bias primals_5 = self._norm_layer2.weight primals_6 = self._norm_layer2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
hcmus-nlp-chatbot/CRSLab
GateLayer
false
15,499
[ "MIT" ]
315
b3ab262a4ad93cbae98fe66541eb735377768a35
https://github.com/hcmus-nlp-chatbot/CRSLab/tree/b3ab262a4ad93cbae98fe66541eb735377768a35
compressedSigmoid
import torch import torch.nn as nn import torch._utils class compressedSigmoid(nn.Module): def __init__(self, para=2.0, bias=0.2): super(compressedSigmoid, self).__init__() self.para = para self.bias = bias def forward(self, x): output = 1.0 / (self.para + torch.exp(-x)) + self.bias return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_neg_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = 2.0 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 1, tl.int32) tmp6 = tmp5 / tmp4 tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = 0.2 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_neg_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class compressedSigmoidNew(nn.Module): def __init__(self, para=2.0, bias=0.2): super(compressedSigmoidNew, self).__init__() self.para = para self.bias = bias def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
henbucuoshanghai/crowed-count-
compressedSigmoid
false
15,500
[ "MIT" ]
81
3353c0a8011b6b83e6e0392258a88706378b443b
https://github.com/henbucuoshanghai/crowed-count-/tree/3353c0a8011b6b83e6e0392258a88706378b443b
MultiHeadedAttention
import math import torch import torch.nn.functional as F class MultiHeadedAttention(torch.nn.Module): """ Implement of multi-head attention. """ def __init__(self, n_heads, hidden_size, drop_rate): super().__init__() assert hidden_size % n_heads == 0 self.n_dk = hidden_size // n_heads self.n_heads = n_heads self.proj_query = torch.nn.Linear(hidden_size, hidden_size) self.proj_key = torch.nn.Linear(hidden_size, hidden_size) self.proj_value = torch.nn.Linear(hidden_size, hidden_size) self.dropout = torch.nn.Dropout(drop_rate) self.proj_output = torch.nn.Linear(hidden_size, hidden_size) def forward(self, input_, mask=None): """ Input: embedding. """ batch_size = input_.size(0) query = self.proj_query(input_) query = query.view(batch_size, -1, self.n_heads, self.n_dk).transpose( 1, 2) key = self.proj_key(input_) key = key.view(batch_size, -1, self.n_heads, self.n_dk).transpose(1, 2) value = self.proj_value(input_) value = value.view(batch_size, -1, self.n_heads, self.n_dk).transpose( 1, 2) scores = query @ key.transpose(-2, -1) scores = scores / math.sqrt(self.n_dk) if mask is not None: mask = mask[:, None, None, :] scores = scores.masked_fill(mask == 0, -1000000000.0) attn = F.softmax(scores, dim=-1) attn = self.dropout(attn) cv = attn @ value cv = cv.transpose(1, 2) cv = cv.contiguous().view(batch_size, -1, self.n_heads * self.n_dk) return self.proj_output(cv) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'hidden_size': 4, 'drop_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) @triton.jit def triton_per_fused_1(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = float('-inf') tmp12 = tmp0 == tmp11 tmp13 = tmp12 == 0 tmp14 = tmp13.to(tl.int64) tmp15 = tmp14 != 0 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = triton_helpers.any(tmp18, 1)[:, None] tmp20 = tmp19 == 0 tmp21 = tmp6 / tmp10 tmp22 = 0.0 tmp23 = tl.where(tmp20, tmp22, tmp21) tl.store(out_ptr3 + (r1 + 16 * x0), tmp23, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused_1[grid(256)](buf5, buf9, 256, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf5 buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_2[grid(16, 16)](buf2, primals_7, buf10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_9 return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0 ), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_8 class MultiHeadedAttentionNew(torch.nn.Module): """ Implement of multi-head attention. """ def __init__(self, n_heads, hidden_size, drop_rate): super().__init__() assert hidden_size % n_heads == 0 self.n_dk = hidden_size // n_heads self.n_heads = n_heads self.proj_query = torch.nn.Linear(hidden_size, hidden_size) self.proj_key = torch.nn.Linear(hidden_size, hidden_size) self.proj_value = torch.nn.Linear(hidden_size, hidden_size) self.dropout = torch.nn.Dropout(drop_rate) self.proj_output = torch.nn.Linear(hidden_size, hidden_size) def forward(self, input_0): primals_2 = self.proj_query.weight primals_3 = self.proj_query.bias primals_4 = self.proj_key.weight primals_5 = self.proj_key.bias primals_6 = self.proj_value.weight primals_7 = self.proj_value.bias primals_8 = self.proj_output.weight primals_9 = self.proj_output.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
haophancs/TREQS
MultiHeadedAttention
false
15,501
[ "MIT" ]
149
49e354ce2a08cf963ec139d99936020e0f80ced8
https://github.com/haophancs/TREQS/tree/49e354ce2a08cf963ec139d99936020e0f80ced8
MultiHeadAttention
import math import torch import torch.cuda from torch import nn import torch.distributed import torch.utils.data import torch.optim class MultiHeadAttention(nn.Module): """ Multi-head scaled dot-product attention layer. Args: hidden_size: size of the embeddings in the model, also known as d_model num_attention_heads: number of heads in multi-head attention attn_score_dropout: probability of dropout applied to attention scores attn_layer_dropout: probability of dropout applied to the output of the whole layer, but before layer normalization """ def __init__(self, hidden_size, num_attention_heads, attn_score_dropout =0.0, attn_layer_dropout=0.0): super().__init__() if hidden_size % num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (hidden_size, num_attention_heads)) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.attn_head_size = int(hidden_size / num_attention_heads) self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size)) self.query_net = nn.Linear(hidden_size, hidden_size) self.key_net = nn.Linear(hidden_size, hidden_size) self.value_net = nn.Linear(hidden_size, hidden_size) self.out_projection = nn.Linear(hidden_size, hidden_size) self.attn_dropout = nn.Dropout(attn_score_dropout) self.layer_dropout = nn.Dropout(attn_layer_dropout) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attn_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, queries, keys, values, attention_mask): query = self.query_net(queries) key = self.key_net(keys) value = self.value_net(values) query = self.transpose_for_scores(query) / self.attn_scale key = self.transpose_for_scores(key) / self.attn_scale value = self.transpose_for_scores(value) attention_scores = torch.matmul(query, key.transpose(-1, -2)) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = torch.softmax(attention_scores, dim=-1) attention_probs = self.attn_dropout(attention_probs) context = torch.matmul(attention_probs, value) context = context.permute(0, 2, 1, 3).contiguous() new_context_shape = context.size()[:-2] + (self.hidden_size,) context = context.view(*new_context_shape) output_states = self.out_projection(context) output_states = self.layer_dropout(output_states) return output_states def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'num_attention_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.cuda from torch import nn import torch.distributed import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_div_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused__softmax_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_div_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_div_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](buf5, primals_10, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_add_2[grid(256)](buf8, primals_10, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_10 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_3[grid(16, 4)](buf2, primals_8, buf9, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_12, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_12 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadAttentionNew(nn.Module): """ Multi-head scaled dot-product attention layer. Args: hidden_size: size of the embeddings in the model, also known as d_model num_attention_heads: number of heads in multi-head attention attn_score_dropout: probability of dropout applied to attention scores attn_layer_dropout: probability of dropout applied to the output of the whole layer, but before layer normalization """ def __init__(self, hidden_size, num_attention_heads, attn_score_dropout =0.0, attn_layer_dropout=0.0): super().__init__() if hidden_size % num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (hidden_size, num_attention_heads)) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.attn_head_size = int(hidden_size / num_attention_heads) self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size)) self.query_net = nn.Linear(hidden_size, hidden_size) self.key_net = nn.Linear(hidden_size, hidden_size) self.value_net = nn.Linear(hidden_size, hidden_size) self.out_projection = nn.Linear(hidden_size, hidden_size) self.attn_dropout = nn.Dropout(attn_score_dropout) self.layer_dropout = nn.Dropout(attn_layer_dropout) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attn_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1, input_2, input_3): primals_1 = self.query_net.weight primals_2 = self.query_net.bias primals_4 = self.key_net.weight primals_5 = self.key_net.bias primals_7 = self.value_net.weight primals_8 = self.value_net.bias primals_11 = self.out_projection.weight primals_12 = self.out_projection.bias primals_3 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
hamjam/NeMo
MultiHeadAttention
false
15,502
[ "Apache-2.0" ]
4,145
b3484d32e1317666151f931bfa39867d88ed8658
https://github.com/hamjam/NeMo/tree/b3484d32e1317666151f931bfa39867d88ed8658
RankingLoss
import torch import torch.nn.functional as F from abc import abstractmethod import torch.utils.data.dataloader import torch.nn as nn import torch.nn class SimilarityLoss(nn.Module): def __init__(self): super(SimilarityLoss, self).__init__() @abstractmethod def forward(self, inputs, targets): pass class RankingLoss(SimilarityLoss): """ Triplet ranking loss between pair similarities and pair labels. """ def __init__(self, margin=0.1, direction_weights=[0.5, 0.5]): super(RankingLoss, self).__init__() self.margin = margin self.direction_weights = direction_weights def forward(self, inputs, targets): n = inputs.shape[0] neg_targets = torch.ones_like(targets) - targets ranking_loss_matrix_01 = neg_targets * F.relu(self.margin + inputs - torch.diag(inputs).view(n, 1)) ranking_loss_matrix_10 = neg_targets * F.relu(self.margin + inputs - torch.diag(inputs).view(1, n)) neg_targets_01_sum = torch.sum(neg_targets, dim=1) neg_targets_10_sum = torch.sum(neg_targets, dim=0) loss = self.direction_weights[0] * torch.mean(torch.sum( ranking_loss_matrix_01 / neg_targets_01_sum, dim=1) ) + self.direction_weights[1] * torch.mean(torch.sum( ranking_loss_matrix_10 / neg_targets_10_sum, dim=0)) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from abc import abstractmethod import torch.utils.data.dataloader import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 5 * r0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + 1) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp18 = tl.load(in_ptr0 + 2) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp22 = tl.load(in_ptr0 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp27 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + 4) tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.load(in_ptr0 + 5) tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp41 = tl.load(in_ptr0 + 6) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp45 = tl.load(in_ptr0 + 7) tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp51 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr0 + 8) tmp59 = tl.broadcast_to(tmp58, [XBLOCK, RBLOCK]) tmp61 = tl.load(in_ptr0 + 9) tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp65 = tl.load(in_ptr0 + 10) tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK]) tmp69 = tl.load(in_ptr0 + 11) tmp70 = tl.broadcast_to(tmp69, [XBLOCK, RBLOCK]) tmp75 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp77 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr0 + 12) tmp83 = tl.broadcast_to(tmp82, [XBLOCK, RBLOCK]) tmp85 = tl.load(in_ptr0 + 13) tmp86 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK]) tmp89 = tl.load(in_ptr0 + 14) tmp90 = tl.broadcast_to(tmp89, [XBLOCK, RBLOCK]) tmp93 = tl.load(in_ptr0 + 15) tmp94 = tl.broadcast_to(tmp93, [XBLOCK, RBLOCK]) tmp99 = tl.load(in_ptr0 + r0, None) tmp101 = tl.load(in_ptr1 + r0, None) tmp106 = tl.load(in_ptr0 + (4 + r0), None) tmp109 = tl.load(in_ptr0 + (8 + r0), None) tmp112 = tl.load(in_ptr0 + (12 + r0), None) tmp116 = tl.load(in_ptr1 + (4 + r0), None) tmp123 = tl.load(in_ptr1 + (8 + r0), None) tmp130 = tl.load(in_ptr1 + (12 + r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = 0.1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl.full([1, 1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tmp2 * tmp9 tmp13 = tmp1 - tmp12 tmp16 = tmp1 - tmp15 tmp17 = tmp13 + tmp16 tmp20 = tmp1 - tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp1 - tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp10 / tmp25 tmp28 = tmp1 - tmp27 tmp30 = tmp29 + tmp4 tmp31 = tmp30 - tmp6 tmp32 = triton_helpers.maximum(tmp8, tmp31) tmp33 = tmp28 * tmp32 tmp36 = tmp1 - tmp35 tmp39 = tmp1 - tmp38 tmp40 = tmp36 + tmp39 tmp43 = tmp1 - tmp42 tmp44 = tmp40 + tmp43 tmp47 = tmp1 - tmp46 tmp48 = tmp44 + tmp47 tmp49 = tmp33 / tmp48 tmp50 = tmp26 + tmp49 tmp52 = tmp1 - tmp51 tmp54 = tmp53 + tmp4 tmp55 = tmp54 - tmp6 tmp56 = triton_helpers.maximum(tmp8, tmp55) tmp57 = tmp52 * tmp56 tmp60 = tmp1 - tmp59 tmp63 = tmp1 - tmp62 tmp64 = tmp60 + tmp63 tmp67 = tmp1 - tmp66 tmp68 = tmp64 + tmp67 tmp71 = tmp1 - tmp70 tmp72 = tmp68 + tmp71 tmp73 = tmp57 / tmp72 tmp74 = tmp50 + tmp73 tmp76 = tmp1 - tmp75 tmp78 = tmp77 + tmp4 tmp79 = tmp78 - tmp6 tmp80 = triton_helpers.maximum(tmp8, tmp79) tmp81 = tmp76 * tmp80 tmp84 = tmp1 - tmp83 tmp87 = tmp1 - tmp86 tmp88 = tmp84 + tmp87 tmp91 = tmp1 - tmp90 tmp92 = tmp88 + tmp91 tmp95 = tmp1 - tmp94 tmp96 = tmp92 + tmp95 tmp97 = tmp81 / tmp96 tmp98 = tmp74 + tmp97 tmp100 = tmp1 - tmp99 tmp102 = tmp101 + tmp4 tmp103 = tmp102 - tmp6 tmp104 = triton_helpers.maximum(tmp8, tmp103) tmp105 = tmp100 * tmp104 tmp107 = tmp1 - tmp106 tmp108 = tmp100 + tmp107 tmp110 = tmp1 - tmp109 tmp111 = tmp108 + tmp110 tmp113 = tmp1 - tmp112 tmp114 = tmp111 + tmp113 tmp115 = tmp105 / tmp114 tmp117 = tmp116 + tmp4 tmp118 = tmp117 - tmp6 tmp119 = triton_helpers.maximum(tmp8, tmp118) tmp120 = tmp107 * tmp119 tmp121 = tmp120 / tmp114 tmp122 = tmp115 + tmp121 tmp124 = tmp123 + tmp4 tmp125 = tmp124 - tmp6 tmp126 = triton_helpers.maximum(tmp8, tmp125) tmp127 = tmp110 * tmp126 tmp128 = tmp127 / tmp114 tmp129 = tmp122 + tmp128 tmp131 = tmp130 + tmp4 tmp132 = tmp131 - tmp6 tmp133 = triton_helpers.maximum(tmp8, tmp132) tmp134 = tmp113 * tmp133 tmp135 = tmp134 / tmp114 tmp136 = tmp129 + tmp135 tmp137 = tl.broadcast_to(tmp98, [XBLOCK, RBLOCK]) tmp139 = tl.sum(tmp137, 1)[:, None] tmp140 = tl.broadcast_to(tmp136, [XBLOCK, RBLOCK]) tmp142 = tl.sum(tmp140, 1)[:, None] tmp143 = 4.0 tmp144 = tmp139 / tmp143 tmp145 = 0.5 tmp146 = tmp144 * tmp145 tmp147 = tmp142 / tmp143 tmp148 = tmp147 * tmp145 tmp149 = tmp146 + tmp148 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp149, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0[grid(1)]( buf4, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class SimilarityLoss(nn.Module): def __init__(self): super(SimilarityLoss, self).__init__() @abstractmethod def forward(self, inputs, targets): pass class RankingLossNew(SimilarityLoss): """ Triplet ranking loss between pair similarities and pair labels. """ def __init__(self, margin=0.1, direction_weights=[0.5, 0.5]): super(RankingLossNew, self).__init__() self.margin = margin self.direction_weights = direction_weights def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
helloMLWo/daga
RankingLoss
false
15,503
[ "MIT" ]
46
88c7a1776ff36bd1abe1026103454e23ec77b552
https://github.com/helloMLWo/daga/tree/88c7a1776ff36bd1abe1026103454e23ec77b552
CNN
import torch import torch.nn as nn import torch._utils class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.cnn = nn.Conv2d(1, 1, 3, stride=1, padding=1) def forward(self, input): output = self.cnn(input) return output def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class CNNNew(nn.Module): def __init__(self): super(CNNNew, self).__init__() self.cnn = nn.Conv2d(1, 1, 3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.cnn.weight primals_2 = self.cnn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
henbucuoshanghai/crowed-count-
CNN
false
15,504
[ "MIT" ]
81
3353c0a8011b6b83e6e0392258a88706378b443b
https://github.com/henbucuoshanghai/crowed-count-/tree/3353c0a8011b6b83e6e0392258a88706378b443b
ScaledDotProductAttention
import torch import numpy as np import torch.utils.data class ScaledDotProductAttention(torch.nn.Module): """ Scaled, softmax attention module for Transformer as defined by Attention(Q, K, V) on pg 4. Returns the final attention vectors as well as the attention matrices (pairwise scores). """ def __init__(self): super(ScaledDotProductAttention, self).__init__() self.softmax = torch.nn.Softmax(dim=-1) def forward(self, Q, K, V, mask=None, dropout=None): scores = torch.matmul(Q, K.transpose(-2, -1)) scores = scores / np.sqrt(K.shape[-1]) if mask is not None: scores = scores.masked_fill(mask == 0, -np.inf) scores = self.softmax(scores) if dropout is not None: scores = dropout(scores) return torch.matmul(scores, V), scores def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 2.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6.to(tl.float64) tmp21 = tmp20 * tmp1 tmp22 = tmp21.to(tl.float32) tmp23 = tmp19 / tmp22 tmp24 = tl_math.exp(tmp23) tl.store(out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_sqrt_0[grid(256)](buf0, buf1, 256, XBLOCK =256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class ScaledDotProductAttentionNew(torch.nn.Module): """ Scaled, softmax attention module for Transformer as defined by Attention(Q, K, V) on pg 4. Returns the final attention vectors as well as the attention matrices (pairwise scores). """ def __init__(self): super(ScaledDotProductAttentionNew, self).__init__() self.softmax = torch.nn.Softmax(dim=-1) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
hengwei-chan/protein_transformer
ScaledDotProductAttention
false
15,505
[ "BSD-3-Clause" ]
77
988bb0fcbb94b37e5a02071bd345ea073ad605f8
https://github.com/hengwei-chan/protein_transformer/tree/988bb0fcbb94b37e5a02071bd345ea073ad605f8
CMVN
import torch import torch.nn as nn class CMVN(nn.Module): __constants__ = ['mode', 'dim', 'eps'] def __init__(self, mode='global', dim=2, eps=1e-10): super(CMVN, self).__init__() if mode != 'global': raise NotImplementedError( 'Only support global mean variance normalization.') self.mode = mode self.dim = dim self.eps = eps def forward(self, x): if self.mode == 'global': return (x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std (self.dim, keepdim=True)) def extra_repr(self): return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-10 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tl.store(out_ptr0 + x3, tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CMVNNew(nn.Module): __constants__ = ['mode', 'dim', 'eps'] def __init__(self, mode='global', dim=2, eps=1e-10): super(CMVNNew, self).__init__() if mode != 'global': raise NotImplementedError( 'Only support global mean variance normalization.') self.mode = mode self.dim = dim self.eps = eps def extra_repr(self): return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hhhaaahhhaa/s3prl
CMVN
false
15,506
[ "Apache-2.0" ]
856
a469787f05c42196c4d989555082f5fd9dcbe8a6
https://github.com/hhhaaahhhaa/s3prl/tree/a469787f05c42196c4d989555082f5fd9dcbe8a6
CAM
import torch import torch.nn as nn import torch.nn.functional as F import torch._utils class CAM(nn.Module): def __init__(self, in_dim): super(CAM, self).__init__() self.para_mu = nn.Parameter(torch.zeros(1)) def forward(self, x): N, C, H, W = x.size() proj_query = x.view(N, C, -1) proj_key = x.view(N, C, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) torch.max(energy, -1, keepdim=True)[0].expand_as(energy) - energy attention = F.softmax(energy, dim=-1) proj_value = x.view(N, C, -1) out = torch.bmm(attention, proj_value) out = out.view(N, C, H, W) out = self.para_mu * out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + (x0 + 16 * x2 + 64 * x1), xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._scaled_dot_product_efficient_attention.default( reinterpret_tensor(primals_1, (1, 4, 4, 16), (256, 64, 16, 1), 0), reinterpret_tensor(primals_1, (1, 4, 4, 16), (256, 64, 16, 1), 0), reinterpret_tensor(primals_1, (1, 4, 4, 16), (256, 64, 16, 1), 0), None, False, scale=1.0) buf1 = buf0[0] del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (16, 64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, buf1, primals_1, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf5, reinterpret_tensor(buf1, (4, 4, 4, 4), (16, 64, 4, 1), 0) class CAMNew(nn.Module): def __init__(self, in_dim): super(CAMNew, self).__init__() self.para_mu = nn.Parameter(torch.zeros(1)) def forward(self, input_0): primals_2 = self.para_mu primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
henbucuoshanghai/crowed-count-
CAM
false
15,507
[ "MIT" ]
81
3353c0a8011b6b83e6e0392258a88706378b443b
https://github.com/henbucuoshanghai/crowed-count-/tree/3353c0a8011b6b83e6e0392258a88706378b443b
SelfAttentionBatch
import torch from torch import nn import torch.nn.functional as F class SelfAttentionBatch(nn.Module): def __init__(self, dim, da, alpha=0.2, dropout=0.5): super(SelfAttentionBatch, self).__init__() self.dim = dim self.da = da self.alpha = alpha self.dropout = dropout self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)), requires_grad=True) self.b = nn.Parameter(torch.zeros(size=(self.da, 1)), requires_grad =True) nn.init.xavier_uniform_(self.a.data, gain=1.414) nn.init.xavier_uniform_(self.b.data, gain=1.414) def forward(self, h): e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze( dim=1) attention = F.softmax(e, dim=0) return torch.matmul(attention, h) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'da': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_3, out=buf2) buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused__softmax_1[grid(1)](buf2, buf5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0), primals_2, out=buf6) del buf5 return reinterpret_tensor(buf6, (4,), (1,), 0 ), buf1, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0) class SelfAttentionBatchNew(nn.Module): def __init__(self, dim, da, alpha=0.2, dropout=0.5): super(SelfAttentionBatchNew, self).__init__() self.dim = dim self.da = da self.alpha = alpha self.dropout = dropout self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)), requires_grad=True) self.b = nn.Parameter(torch.zeros(size=(self.da, 1)), requires_grad =True) nn.init.xavier_uniform_(self.a.data, gain=1.414) nn.init.xavier_uniform_(self.b.data, gain=1.414) def forward(self, input_0): primals_1 = self.a primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hcmus-nlp-chatbot/CRSLab
SelfAttentionBatch
false
15,508
[ "MIT" ]
315
b3ab262a4ad93cbae98fe66541eb735377768a35
https://github.com/hcmus-nlp-chatbot/CRSLab/tree/b3ab262a4ad93cbae98fe66541eb735377768a35
ACELoss
import torch import torch.nn as nn class ACELoss(nn.Module): """ Ref: [1] Aggregation Cross-Entropy for Sequence Recognition. CVPR-2019 """ def __init__(self, character, eps=1e-10): """ Args: character (dict): recognition dictionary eps (float): margin of error """ super(ACELoss, self).__init__() self.dict = character self.eps = eps def forward(self, inputs, label): """ Args: inputs (Torch.Tensor): model output label (Torch.Tensor): label information Returns: Torch.Tensor: ace loss """ batch, time_dim, _ = inputs.size() inputs = inputs + self.eps label = label.float() label[:, 0] = time_dim - label[:, 0] inputs = torch.sum(inputs, 1) inputs = inputs / time_dim label = label / time_dim loss = -torch.sum(torch.log(inputs) * label) / batch return loss def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'character': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_copy_rsub_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = 4.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr1 + (x0 + 64 * x1), tmp2, xmask) @triton.jit def triton_per_fused_add_div_log_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 % 4 r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + r3, None) tmp1 = 1e-10 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tmp14 = tl_math.log(tmp13) tmp16 = tmp15 * tmp12 tmp17 = tmp14 * tmp16 tmp18 = tl.broadcast_to(tmp17, [RBLOCK]) tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0)) tmp21 = -tmp20 tmp22 = tmp21 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_copy_rsub_0[grid(64)](arg1_1, arg1_1, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_log_mul_neg_sum_1[grid(1)](buf4, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class ACELossNew(nn.Module): """ Ref: [1] Aggregation Cross-Entropy for Sequence Recognition. CVPR-2019 """ def __init__(self, character, eps=1e-10): """ Args: character (dict): recognition dictionary eps (float): margin of error """ super(ACELossNew, self).__init__() self.dict = character self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hikopensource/DAVAR-Lab-OCR
ACELoss
false
15,509
[ "Apache-2.0" ]
387
c65285f6668864cca7a12770ae4c8d083ea1cf1b
https://github.com/hikopensource/DAVAR-Lab-OCR/tree/c65285f6668864cca7a12770ae4c8d083ea1cf1b
MultiscalePixelLoss
import torch import torch.nn as nn class MultiscalePixelLoss(nn.Module): def __init__(self, loss_f=nn.L1Loss(), scale=5): super(MultiscalePixelLoss, self).__init__() self.criterion = loss_f self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False) self.weights = [1, 0.5, 0.25, 0.125, 0.125] self.weights = self.weights[:scale] def forward(self, x: 'torch.Tensor', y: 'torch.Tensor', mask=None ) ->torch.Tensor: loss = 0 if mask is not None: mask = mask.expand(-1, x.size()[1], -1, -1) for i in range(len(self.weights)): if mask is not None: loss += self.weights[i] * self.criterion(x * mask, y * mask) else: loss += self.weights[i] * self.criterion(x, y) if i != len(self.weights) - 1: x = self.downsample(x) y = self.downsample(y) if mask is not None: mask = self.downsample(mask) return loss def get_inputs(): return [torch.rand([4, 4, 64, 64]), torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_red_fused_abs_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 8 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = _tmp5 + tmp4 _tmp5 = tl.where(rmask & xmask, tmp6, _tmp5) tmp5 = tl.sum(_tmp5, 1)[:, None] tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_per_fused_abs_mean_sub_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_red_fused_abs_avg_pool2d_mean_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): xnumel = 2 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex % 32 r2 = rindex // 32 r3 = rindex tmp0 = tl.load(in_ptr0 + (2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + (1 + 2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (64 + 2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr0 + (65 + 2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (1 + 2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr1 + (64 + 2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (65 + 2 * r1 + 128 * r2 + 32768 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp11 = tmp10 + tmp9 tmp13 = tmp12 + tmp11 tmp15 = tmp14 + tmp13 tmp16 = tmp15 * tmp7 tmp17 = tmp8 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tl.store(out_ptr0 + (r3 + 8192 * x0), tmp8, rmask & xmask) tl.store(out_ptr1 + (r3 + 8192 * x0), tmp16, rmask & xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr2 + x0, tmp20, xmask) @triton.jit def triton_per_fused_abs_mean_sub_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_red_fused_abs_avg_pool2d_mean_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (2 * r0 + 64 * r1), rmask, eviction_policy ='evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + (1 + 2 * r0 + 64 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (32 + 2 * r0 + 64 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr0 + (33 + 2 * r0 + 64 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (2 * r0 + 64 * r1), rmask, eviction_policy ='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (1 + 2 * r0 + 64 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr1 + (32 + 2 * r0 + 64 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (33 + 2 * r0 + 64 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp11 = tmp10 + tmp9 tmp13 = tmp12 + tmp11 tmp15 = tmp14 + tmp13 tmp16 = tmp15 * tmp7 tmp17 = tmp8 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask, tmp21, _tmp20) tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp8, rmask) tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, rmask ) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) @triton.jit def triton_red_fused_abs_avg_pool2d_mean_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r0 = rindex % 8 r1 = rindex // 8 r2 = rindex tmp0 = tl.load(in_ptr0 + (2 * r0 + 32 * r1), rmask, eviction_policy ='evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + (1 + 2 * r0 + 32 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (16 + 2 * r0 + 32 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr0 + (17 + 2 * r0 + 32 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (2 * r0 + 32 * r1), rmask, eviction_policy ='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (1 + 2 * r0 + 32 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr1 + (16 + 2 * r0 + 32 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (17 + 2 * r0 + 32 * r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp11 = tmp10 + tmp9 tmp13 = tmp12 + tmp11 tmp15 = tmp14 + tmp13 tmp16 = tmp15 * tmp7 tmp17 = tmp8 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask, tmp21, _tmp20) tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp8, rmask) tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, rmask ) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) @triton.jit def triton_per_fused_abs_add_avg_pool2d_mean_mul_sub_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + (2 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (2 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (1 + 2 * r0 + 16 * r1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr1 + (8 + 2 * r0 + 16 * r1), None, eviction_policy ='evict_last') tmp14 = tl.load(in_ptr1 + (9 + 2 * r0 + 16 * r1), None, eviction_policy ='evict_last') tmp22 = tl.load(in_out_ptr0 + 0) tmp23 = tl.broadcast_to(tmp22, [1]) tmp30 = tl.load(in_ptr2 + 0) tmp31 = tl.broadcast_to(tmp30, [1]) tmp37 = tl.load(in_ptr3 + 0) tmp38 = tl.broadcast_to(tmp37, [1]) tmp43 = tl.load(in_ptr4 + 0) tmp44 = tl.broadcast_to(tmp43, [1]) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp11 = tmp10 + tmp9 tmp13 = tmp12 + tmp11 tmp15 = tmp14 + tmp13 tmp16 = tmp15 * tmp7 tmp17 = tmp8 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp24 = 65536.0 tmp25 = tmp23 / tmp24 tmp26 = 1.0 tmp27 = tmp25 * tmp26 tmp28 = 0.0 tmp29 = tmp27 + tmp28 tmp32 = 16384.0 tmp33 = tmp31 / tmp32 tmp34 = 0.5 tmp35 = tmp33 * tmp34 tmp36 = tmp29 + tmp35 tmp39 = 4096.0 tmp40 = tmp38 / tmp39 tmp41 = tmp40 * tmp7 tmp42 = tmp36 + tmp41 tmp45 = 1024.0 tmp46 = tmp44 / tmp45 tmp47 = 0.125 tmp48 = tmp46 * tmp47 tmp49 = tmp42 + tmp48 tmp50 = 256.0 tmp51 = tmp21 / tmp50 tmp52 = tmp51 * tmp47 tmp53 = tmp49 + tmp52 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp53, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(arg1_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8,), (1,), torch.float32) get_raw_stream(0) triton_red_fused_abs_mean_sub_0[grid(8)](arg1_1, arg0_1, buf0, 8, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf1 = empty_strided_cuda((), (), torch.float32) triton_per_fused_abs_mean_sub_1[grid(1)](buf0, buf1, 1, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf4 = empty_strided_cuda((2,), (1,), torch.float32) triton_red_fused_abs_avg_pool2d_mean_sub_2[grid(2)](arg1_1, arg0_1, buf2, buf3, buf4, 2, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del arg0_1 del arg1_1 buf5 = empty_strided_cuda((), (), torch.float32) triton_per_fused_abs_mean_sub_3[grid(1)](buf4, buf5, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf4 buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) buf7 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) buf8 = empty_strided_cuda((), (), torch.float32) triton_red_fused_abs_avg_pool2d_mean_sub_4[grid(1)](buf2, buf3, buf6, buf7, buf8, 1, 4096, XBLOCK=1, RBLOCK=4096, num_warps=16, num_stages=1) del buf2 del buf3 buf9 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32 ) buf11 = empty_strided_cuda((), (), torch.float32) triton_red_fused_abs_avg_pool2d_mean_sub_5[grid(1)](buf6, buf7, buf9, buf10, buf11, 1, 1024, XBLOCK=1, RBLOCK=1024, num_warps=8, num_stages=1) del buf6 del buf7 buf13 = buf1 del buf1 triton_per_fused_abs_add_avg_pool2d_mean_mul_sub_6[grid(1)](buf13, buf9, buf10, buf5, buf8, buf11, 1, 256, num_warps=2, num_stages=1) del buf10 del buf11 del buf5 del buf8 del buf9 return buf13, class MultiscalePixelLossNew(nn.Module): def __init__(self, loss_f=nn.L1Loss(), scale=5): super(MultiscalePixelLossNew, self).__init__() self.criterion = loss_f self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False) self.weights = [1, 0.5, 0.25, 0.125, 0.125] self.weights = self.weights[:scale] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
grofit/traiNNer
MultiscalePixelLoss
false
15,510
[ "Apache-2.0" ]
78
12d006fd44ed304e4178839c53b1f3d95ca25dcb
https://github.com/grofit/traiNNer/tree/12d006fd44ed304e4178839c53b1f3d95ca25dcb
TransformerNet
import torch class ConvLayer(torch.nn.Module): """ A small wrapper around nn.Conv2d, so as to make the code cleaner and allow for experimentation with padding """ def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=kernel_size // 2, padding_mode= 'reflect') def forward(self, x): return self.conv2d(x) class ResidualBlock(torch.nn.Module): """ Originally introduced in (Microsoft Research Asia, He et al.): https://arxiv.org/abs/1512.03385 Modified architecture according to suggestions in this blog: http://torch.ch/blog/2016/02/04/resnets.html The only difference from the original is: There is no ReLU layer after the addition of identity and residual """ def __init__(self, channels): super(ResidualBlock, self).__init__() kernel_size = 3 stride_size = 1 self.conv1 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=stride_size) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=stride_size) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) return out + residual class UpsampleConvLayer(torch.nn.Module): """ Nearest-neighbor up-sampling followed by a convolution Appears to give better results than learned up-sampling aka transposed conv (avoids the checkerboard artifact) Initially proposed on distill pub: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() self.upsampling_factor = stride self.conv2d = ConvLayer(in_channels, out_channels, kernel_size, stride=1) def forward(self, x): if self.upsampling_factor > 1: x = torch.nn.functional.interpolate(x, scale_factor=self. upsampling_factor, mode='nearest') return self.conv2d(x) class TransformerNet(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() num_of_channels = [3, 32, 64, 128] kernel_sizes = [9, 3, 3] stride_sizes = [1, 2, 2] self.conv1 = ConvLayer(num_of_channels[0], num_of_channels[1], kernel_size=kernel_sizes[0], stride=stride_sizes[0]) self.in1 = torch.nn.InstanceNorm2d(num_of_channels[1], affine=True) self.conv2 = ConvLayer(num_of_channels[1], num_of_channels[2], kernel_size=kernel_sizes[1], stride=stride_sizes[1]) self.in2 = torch.nn.InstanceNorm2d(num_of_channels[2], affine=True) self.conv3 = ConvLayer(num_of_channels[2], num_of_channels[3], kernel_size=kernel_sizes[2], stride=stride_sizes[2]) self.in3 = torch.nn.InstanceNorm2d(num_of_channels[3], affine=True) res_block_num_of_filters = 128 self.res1 = ResidualBlock(res_block_num_of_filters) self.res2 = ResidualBlock(res_block_num_of_filters) self.res3 = ResidualBlock(res_block_num_of_filters) self.res4 = ResidualBlock(res_block_num_of_filters) self.res5 = ResidualBlock(res_block_num_of_filters) num_of_channels.reverse() kernel_sizes.reverse() stride_sizes.reverse() self.up1 = UpsampleConvLayer(num_of_channels[0], num_of_channels[1], kernel_size=kernel_sizes[0], stride=stride_sizes[0]) self.in4 = torch.nn.InstanceNorm2d(num_of_channels[1], affine=True) self.up2 = UpsampleConvLayer(num_of_channels[1], num_of_channels[2], kernel_size=kernel_sizes[1], stride=stride_sizes[1]) self.in5 = torch.nn.InstanceNorm2d(num_of_channels[2], affine=True) self.up3 = ConvLayer(num_of_channels[2], num_of_channels[3], kernel_size=kernel_sizes[2], stride=stride_sizes[2]) def forward(self, x): y = self.relu(self.in1(self.conv1(x))) y = self.relu(self.in2(self.conv2(y))) y = self.relu(self.in3(self.conv3(y))) y = self.res1(y) y = self.res2(y) y = self.res3(y) y = self.res4(y) y = self.res5(y) y = self.relu(self.in4(self.up1(y))) y = self.relu(self.in5(self.up2(y))) return self.up3(y) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 62208 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 32 tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6_tmp[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 32, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 557568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x2 = xindex // 4356 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_4(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 1024, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1024.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_repeat_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 64, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 128 tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0 % 128, None, eviction_policy='evict_last') tmp2 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None) tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = tl.broadcast_to(tmp5, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.full([1], 256, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp5 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp4 - tmp12 tmp24 = tmp23 * tmp22 tmp25 = tmp24 * tmp0 tmp26 = tmp25 + tmp1 tmp27 = tl.full([1], 0, tl.int32) tmp28 = triton_helpers.maximum(tmp27, tmp26) tl.store(out_ptr0 + x0, tmp0, None) tl.store(out_ptr1 + x0, tmp1, None) tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp4, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp22, None) tl.store(out_ptr3 + (r3 + 256 * x0), tmp28, None) tl.store(out_ptr2 + x0, tmp12, None) @triton.jit def triton_poi_fused_reflection_pad2d_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_9(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_repeat_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 128, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 128 tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp27 = tl.load(in_out_ptr1 + (r3 + 256 * x0), None) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = tl.broadcast_to(tmp4, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.full([1], 256, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp3 - tmp11 tmp18 = 256.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tmp23 * tmp0 tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tl.store(out_ptr0 + x0, tmp0, None) tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp3, None) tl.store(in_out_ptr1 + (r3 + 256 * x0), tmp28, None) tl.store(out_ptr3 + x0, tmp22, None) tl.store(out_ptr1 + x0, tmp11, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_13(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None) tl.store(out_ptr2 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) tl.store(out_ptr1 + x3, tmp15, None) @triton.jit def triton_poi_fused_arange_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_reflection_pad2d_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 34 % 34 x0 = xindex % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp11 * tmp17 tmp20 = tmp18 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr6 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp24 = tmp22 + tmp23 tl.store(out_ptr0 + x7, tmp24, None) @triton.jit def triton_poi_fused_arange_17(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_relu_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 66 % 66 x0 = xindex % 66 x2 = xindex // 4356 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2), xmask, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tl.store(out_ptr0 + x5, tmp19, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63 ) = args args.clear() assert_size_stride(primals_1, (32, 3, 9, 9), (243, 81, 9, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32,), (1,)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64,), (1,)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128,), (1,)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (128,), (1,)) assert_size_stride(primals_24, (128,), (1,)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (128,), (1,)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (128,), (1,)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128,), (1,)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (128,), (1,)) assert_size_stride(primals_40, (128,), (1,)) assert_size_stride(primals_41, (128,), (1,)) assert_size_stride(primals_42, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_43, (128,), (1,)) assert_size_stride(primals_44, (128,), (1,)) assert_size_stride(primals_45, (128,), (1,)) assert_size_stride(primals_46, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_47, (128,), (1,)) assert_size_stride(primals_48, (128,), (1,)) assert_size_stride(primals_49, (128,), (1,)) assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_51, (128,), (1,)) assert_size_stride(primals_52, (128,), (1,)) assert_size_stride(primals_53, (128,), (1,)) assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_55, (64,), (1,)) assert_size_stride(primals_56, (64,), (1,)) assert_size_stride(primals_57, (64,), (1,)) assert_size_stride(primals_58, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_59, (32,), (1,)) assert_size_stride(primals_60, (32,), (1,)) assert_size_stride(primals_61, (32,), (1,)) assert_size_stride(primals_62, (3, 32, 9, 9), (2592, 81, 9, 1)) assert_size_stride(primals_63, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 72, 72), (15552, 5184, 72, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(62208)](primals_3, buf0, 62208, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf2 = buf1 del buf1 buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32 ) buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch .float32) buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf6 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)](buf2 , buf8, primals_2, buf5, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_2 buf3 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_4, buf3, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_4 buf4 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_5, buf4, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 4356, 66, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_3[grid(557568)](buf2, buf5, buf8, buf3, buf4, buf9, 557568, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf11 = buf10 del buf10 buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf15 triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)]( buf11, buf17, primals_7, buf14, 256, 1024, num_warps=8, num_stages=1) del primals_7 buf12 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_8, buf12, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_8 buf13 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_9, buf13, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf18 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_6[grid(295936)](buf11, buf14, buf17, buf12, buf13, buf18, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf21 = empty_strided_cuda((512,), (1,), torch.float32) buf22 = empty_strided_cuda((512,), (1,), torch.float32) buf20 = buf19 del buf19 buf23 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf24 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf26 = reinterpret_tensor(buf24, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf24 buf27 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7[ grid(512)](buf20, buf26, primals_12, primals_13, primals_11, buf21, buf22, buf23, buf27, 512, 256, num_warps=2, num_stages=1) del primals_11 del primals_12 del primals_13 buf28 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf27, buf28, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf29 = extern_kernels.convolution(buf28, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 16, 16), (32768, 256, 16, 1)) buf30 = buf29 del buf29 buf33 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf34 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf36 = reinterpret_tensor(buf34, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf34 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf30, buf36, primals_15, buf33, 512, 256, num_warps=2, num_stages=1) del primals_15 buf31 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_16, buf31, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_16 buf32 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_17, buf32, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf37 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf30, buf33, buf36, buf31, buf32, buf37, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1)) buf40 = empty_strided_cuda((512,), (1,), torch.float32) buf39 = buf38 del buf38 buf41 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf45 = buf27 del buf27 buf44 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf39, buf45, primals_20, primals_19, primals_21, buf40, buf41, buf44, 512, 256, num_warps=2, num_stages=1) del primals_19 del primals_20 del primals_21 buf46 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf45, buf46, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 128, 16, 16), (32768, 256, 16, 1)) buf48 = buf47 del buf47 buf51 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf52 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf54 = reinterpret_tensor(buf52, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf52 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf48, buf54, primals_23, buf51, 512, 256, num_warps=2, num_stages=1) del primals_23 buf49 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_24, buf49, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_24 buf50 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_25, buf50, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf55 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf48, buf51, buf54, buf49, buf50, buf55, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf55, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 128, 16, 16), (32768, 256, 16, 1)) buf58 = empty_strided_cuda((512,), (1,), torch.float32) buf57 = buf56 del buf56 buf59 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf63 = buf45 del buf45 buf62 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf57, buf63, primals_28, primals_27, primals_29, buf58, buf59, buf62, 512, 256, num_warps=2, num_stages=1) del primals_27 del primals_28 del primals_29 buf64 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf63, buf64, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf65 = extern_kernels.convolution(buf64, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 128, 16, 16), (32768, 256, 16, 1)) buf66 = buf65 del buf65 buf69 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf70 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf72 = reinterpret_tensor(buf70, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf70 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf66, buf72, primals_31, buf69, 512, 256, num_warps=2, num_stages=1) del primals_31 buf67 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_32, buf67, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_32 buf68 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_33, buf68, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_33 buf73 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf66, buf69, buf72, buf67, buf68, buf73, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf74 = extern_kernels.convolution(buf73, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf74, (4, 128, 16, 16), (32768, 256, 16, 1)) buf76 = empty_strided_cuda((512,), (1,), torch.float32) buf75 = buf74 del buf74 buf77 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf81 = buf63 del buf63 buf80 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf75, buf81, primals_36, primals_35, primals_37, buf76, buf77, buf80, 512, 256, num_warps=2, num_stages=1) del primals_35 del primals_36 del primals_37 buf82 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf81, buf82, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf83 = extern_kernels.convolution(buf82, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1)) buf84 = buf83 del buf83 buf87 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf88 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf90 = reinterpret_tensor(buf88, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf88 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf84, buf90, primals_39, buf87, 512, 256, num_warps=2, num_stages=1) del primals_39 buf85 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_40, buf85, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_40 buf86 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_41, buf86, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_41 buf91 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf84, buf87, buf90, buf85, buf86, buf91, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf92 = extern_kernels.convolution(buf91, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf92, (4, 128, 16, 16), (32768, 256, 16, 1)) buf94 = empty_strided_cuda((512,), (1,), torch.float32) buf93 = buf92 del buf92 buf95 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf99 = buf81 del buf81 buf98 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[ grid(512)](buf93, buf99, primals_44, primals_43, primals_45, buf94, buf95, buf98, 512, 256, num_warps=2, num_stages=1) del primals_43 del primals_44 del primals_45 buf100 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_8[grid(165888)](buf99, buf100, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf101 = extern_kernels.convolution(buf100, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf101, (4, 128, 16, 16), (32768, 256, 16, 1)) buf102 = buf101 del buf101 buf105 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch. float32) buf106 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf108 = reinterpret_tensor(buf106, (1, 512, 1, 1), (512, 1, 1, 1), 0) del buf106 triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)]( buf102, buf108, primals_47, buf105, 512, 256, num_warps=2, num_stages=1) del primals_47 buf103 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_48, buf103, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_48 buf104 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_49, buf104, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_49 buf109 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf102, buf105, buf108, buf103, buf104, buf109, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf110 = extern_kernels.convolution(buf109, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf110, (4, 128, 16, 16), (32768, 256, 16, 1)) buf111 = buf110 del buf110 buf113 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf114 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) buf116 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_13[grid(512)]( buf111, primals_51, buf113, buf114, buf116, 512, 256, num_warps =2, num_stages=1) del primals_51 buf112 = empty_strided_cuda((512,), (1,), torch.float32) triton_poi_fused_repeat_10[grid(512)](primals_52, buf112, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_52 buf117 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_arange_14[grid(32)](buf117, 32, XBLOCK=32, num_warps=1, num_stages=1) buf118 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_15[grid(32)](buf118, 32, XBLOCK=32, num_warps=1, num_stages=1) buf119 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) triton_poi_fused__unsafe_index_add_reflection_pad2d_16[grid(591872)]( buf118, buf111, buf113, buf114, buf112, primals_53, buf99, buf119, 591872, XBLOCK=512, num_warps=8, num_stages=1) del buf114 del buf99 del primals_53 buf120 = extern_kernels.convolution(buf119, primals_54, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf121 = buf120 del buf120 buf124 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf125 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf127 = reinterpret_tensor(buf125, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf125 triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)]( buf121, buf127, primals_55, buf124, 256, 1024, num_warps=8, num_stages=1) del primals_55 buf122 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_56, buf122, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_56 buf123 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_5[grid(256)](primals_57, buf123, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_57 buf128 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_arange_17[grid(64)](buf128, 64, XBLOCK=64, num_warps=1, num_stages=1) buf129 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_18[grid(64)](buf129, 64, XBLOCK=64, num_warps=1, num_stages=1) buf130 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_reflection_pad2d_relu_19[grid(1115136)]( buf129, buf121, buf124, buf127, buf122, buf123, buf130, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) buf131 = extern_kernels.convolution(buf130, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf132 = buf131 del buf131 buf135 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch. float32) buf136 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32) buf138 = reinterpret_tensor(buf136, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf136 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)]( buf132, buf138, primals_59, buf135, 128, 4096, XBLOCK=1, RBLOCK =2048, num_warps=16, num_stages=1) del primals_59 buf133 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_60, buf133, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_60 buf134 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_61, buf134, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_61 buf139 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_20[grid(663552)](buf132, buf135, buf138, buf133, buf134, buf139, 663552, XBLOCK=1024, num_warps=4, num_stages=1) buf140 = extern_kernels.convolution(buf139, primals_62, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf140, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf141 = buf140 del buf140 triton_poi_fused_convolution_21[grid(49152)](buf141, primals_63, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_63 return (buf141, primals_1, primals_6, primals_10, primals_14, primals_18, primals_22, primals_26, primals_30, primals_34, primals_38, primals_42, primals_46, primals_50, primals_54, primals_58, primals_62, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, buf13, buf14, buf17, buf18, buf20, buf21, buf22, buf23, buf26, buf28, buf30, buf31, buf32, buf33, buf36, buf37, buf39, buf40, reinterpret_tensor(buf44, (512,), (1,), 0), buf46, buf48, buf49, buf50, buf51, buf54, buf55, buf57, buf58, reinterpret_tensor(buf62, (512,), (1,), 0), buf64, buf66, buf67, buf68, buf69, buf72, buf73, buf75, buf76, reinterpret_tensor(buf80, (512,), (1,), 0), buf82, buf84, buf85, buf86, buf87, buf90, buf91, buf93, buf94, reinterpret_tensor(buf98, (512,), (1,), 0), buf100, buf102, buf103, buf104, buf105, buf108, buf109, buf111, buf112, reinterpret_tensor(buf116, (512,), (1,), 0), buf117, buf118, buf119, buf121, buf122, buf123, buf124, buf127, buf128, buf129, buf130, buf132, buf133, buf134, buf135, buf138, buf139, reinterpret_tensor( buf113, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor( buf95, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf77, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf59, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf41, (1, 512, 1, 1), (512, 1, 1, 1), 0)) class ConvLayer(torch.nn.Module): """ A small wrapper around nn.Conv2d, so as to make the code cleaner and allow for experimentation with padding """ def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=kernel_size // 2, padding_mode= 'reflect') def forward(self, x): return self.conv2d(x) class ResidualBlock(torch.nn.Module): """ Originally introduced in (Microsoft Research Asia, He et al.): https://arxiv.org/abs/1512.03385 Modified architecture according to suggestions in this blog: http://torch.ch/blog/2016/02/04/resnets.html The only difference from the original is: There is no ReLU layer after the addition of identity and residual """ def __init__(self, channels): super(ResidualBlock, self).__init__() kernel_size = 3 stride_size = 1 self.conv1 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=stride_size) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=stride_size) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) return out + residual class UpsampleConvLayer(torch.nn.Module): """ Nearest-neighbor up-sampling followed by a convolution Appears to give better results than learned up-sampling aka transposed conv (avoids the checkerboard artifact) Initially proposed on distill pub: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() self.upsampling_factor = stride self.conv2d = ConvLayer(in_channels, out_channels, kernel_size, stride=1) def forward(self, x): if self.upsampling_factor > 1: x = torch.nn.functional.interpolate(x, scale_factor=self. upsampling_factor, mode='nearest') return self.conv2d(x) class TransformerNetNew(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() num_of_channels = [3, 32, 64, 128] kernel_sizes = [9, 3, 3] stride_sizes = [1, 2, 2] self.conv1 = ConvLayer(num_of_channels[0], num_of_channels[1], kernel_size=kernel_sizes[0], stride=stride_sizes[0]) self.in1 = torch.nn.InstanceNorm2d(num_of_channels[1], affine=True) self.conv2 = ConvLayer(num_of_channels[1], num_of_channels[2], kernel_size=kernel_sizes[1], stride=stride_sizes[1]) self.in2 = torch.nn.InstanceNorm2d(num_of_channels[2], affine=True) self.conv3 = ConvLayer(num_of_channels[2], num_of_channels[3], kernel_size=kernel_sizes[2], stride=stride_sizes[2]) self.in3 = torch.nn.InstanceNorm2d(num_of_channels[3], affine=True) res_block_num_of_filters = 128 self.res1 = ResidualBlock(res_block_num_of_filters) self.res2 = ResidualBlock(res_block_num_of_filters) self.res3 = ResidualBlock(res_block_num_of_filters) self.res4 = ResidualBlock(res_block_num_of_filters) self.res5 = ResidualBlock(res_block_num_of_filters) num_of_channels.reverse() kernel_sizes.reverse() stride_sizes.reverse() self.up1 = UpsampleConvLayer(num_of_channels[0], num_of_channels[1], kernel_size=kernel_sizes[0], stride=stride_sizes[0]) self.in4 = torch.nn.InstanceNorm2d(num_of_channels[1], affine=True) self.up2 = UpsampleConvLayer(num_of_channels[1], num_of_channels[2], kernel_size=kernel_sizes[1], stride=stride_sizes[1]) self.in5 = torch.nn.InstanceNorm2d(num_of_channels[2], affine=True) self.up3 = ConvLayer(num_of_channels[2], num_of_channels[3], kernel_size=kernel_sizes[2], stride=stride_sizes[2]) def forward(self, input_0): primals_1 = self.conv1.conv2d.weight primals_2 = self.conv1.conv2d.bias primals_4 = self.in1.weight primals_5 = self.in1.bias primals_6 = self.conv2.conv2d.weight primals_7 = self.conv2.conv2d.bias primals_8 = self.in2.weight primals_9 = self.in2.bias primals_10 = self.conv3.conv2d.weight primals_11 = self.conv3.conv2d.bias primals_12 = self.in3.weight primals_13 = self.in3.bias primals_14 = self.res1.conv1.conv2d.weight primals_15 = self.res1.conv1.conv2d.bias primals_16 = self.res1.in1.weight primals_17 = self.res1.in1.bias primals_18 = self.res1.conv2.conv2d.weight primals_19 = self.res1.conv2.conv2d.bias primals_20 = self.res1.in2.weight primals_21 = self.res1.in2.bias primals_22 = self.res2.conv1.conv2d.weight primals_23 = self.res2.conv1.conv2d.bias primals_24 = self.res2.in1.weight primals_25 = self.res2.in1.bias primals_26 = self.res2.conv2.conv2d.weight primals_27 = self.res2.conv2.conv2d.bias primals_28 = self.res2.in2.weight primals_29 = self.res2.in2.bias primals_30 = self.res3.conv1.conv2d.weight primals_31 = self.res3.conv1.conv2d.bias primals_32 = self.res3.in1.weight primals_33 = self.res3.in1.bias primals_34 = self.res3.conv2.conv2d.weight primals_35 = self.res3.conv2.conv2d.bias primals_36 = self.res3.in2.weight primals_37 = self.res3.in2.bias primals_38 = self.res4.conv1.conv2d.weight primals_39 = self.res4.conv1.conv2d.bias primals_40 = self.res4.in1.weight primals_41 = self.res4.in1.bias primals_42 = self.res4.conv2.conv2d.weight primals_43 = self.res4.conv2.conv2d.bias primals_44 = self.res4.in2.weight primals_45 = self.res4.in2.bias primals_46 = self.res5.conv1.conv2d.weight primals_47 = self.res5.conv1.conv2d.bias primals_48 = self.res5.in1.weight primals_49 = self.res5.in1.bias primals_50 = self.res5.conv2.conv2d.weight primals_51 = self.res5.conv2.conv2d.bias primals_52 = self.res5.in2.weight primals_53 = self.res5.in2.bias primals_54 = self.up1.conv2d.conv2d.weight primals_55 = self.up1.conv2d.conv2d.bias primals_56 = self.in4.weight primals_57 = self.in4.bias primals_58 = self.up2.conv2d.conv2d.weight primals_59 = self.up2.conv2d.conv2d.bias primals_60 = self.in5.weight primals_61 = self.in5.bias primals_62 = self.up3.conv2d.weight primals_63 = self.up3.conv2d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63]) return output[0]
gordicaleksa/pytorch-nst-feedforward
TransformerNet
false
15,511
[ "MIT" ]
50
00c96e8e3f1b0b7fb4c14254fd0c6f1281a29598
https://github.com/gordicaleksa/pytorch-nst-feedforward/tree/00c96e8e3f1b0b7fb4c14254fd0c6f1281a29598
TripletLoss
import torch import torch.nn as nn class TripletLoss(nn.Module): """Triplet loss for metric learning """ def __init__(self, margin=1.0, p=2, loss_weight=1.0, reduction='mean'): """ Initialization. Args: margin(float): a margin distance between for anchor-positive and anchor-negative p(int): Denominator value, \\sum{x^p}+\\sum{y^p}, default:2 loss_weight(float): loss weight """ super().__init__() self.margin = margin self.p = p self.loss_weight = loss_weight self.reduction = reduction self.loss = nn.TripletMarginLoss(margin=self.margin, p=self.p, reduction=self.reduction) def forward(self, anchor, positive, negative): """ Multiply loss with loss_weight. Args: anchor(Tensor): a tensor of shape [N, C, H, W] positive(Tensor): a tensor of shape same with anchor negative(Tensor): a tensor of shape same with anchor Returns: Tensor: loss tensor """ loss = self.loss_weight * self.loss(anchor, positive, negative) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_min_mean_mul_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1.0 tmp26 = tmp24 + tmp25 tmp28 = tmp0 - tmp27 tmp29 = tmp28 + tmp3 tmp30 = tmp29 * tmp29 tmp32 = tmp6 - tmp31 tmp33 = tmp32 + tmp3 tmp34 = tmp33 * tmp33 tmp35 = tmp30 + tmp34 tmp37 = tmp12 - tmp36 tmp38 = tmp37 + tmp3 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp42 = tmp18 - tmp41 tmp43 = tmp42 + tmp3 tmp44 = tmp43 * tmp43 tmp45 = tmp40 + tmp44 tmp46 = libdevice.sqrt(tmp45) tmp47 = tmp26 - tmp46 tmp48 = 0.0 tmp49 = triton_helpers.maximum(tmp47, tmp48) tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.sum(tmp50, 1)[:, None] tmp53 = 64.0 tmp54 = tmp52 / tmp53 tmp55 = tmp54 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp55, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_clamp_min_mean_mul_norm_sub_0[grid(1)](buf2, arg2_1, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class TripletLossNew(nn.Module): """Triplet loss for metric learning """ def __init__(self, margin=1.0, p=2, loss_weight=1.0, reduction='mean'): """ Initialization. Args: margin(float): a margin distance between for anchor-positive and anchor-negative p(int): Denominator value, \\sum{x^p}+\\sum{y^p}, default:2 loss_weight(float): loss weight """ super().__init__() self.margin = margin self.p = p self.loss_weight = loss_weight self.reduction = reduction self.loss = nn.TripletMarginLoss(margin=self.margin, p=self.p, reduction=self.reduction) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
hikopensource/DAVAR-Lab-OCR
TripletLoss
false
15,512
[ "Apache-2.0" ]
387
c65285f6668864cca7a12770ae4c8d083ea1cf1b
https://github.com/hikopensource/DAVAR-Lab-OCR/tree/c65285f6668864cca7a12770ae4c8d083ea1cf1b
TSAFusion
import torch import torch.nn as nn class TSAFusion(nn.Module): """Temporal Spatial Attention (TSA) fusion module. Temporal: Calculate the correlation between center frame and neighboring frames; Spatial: It has 3 pyramid levels, the attention is similar to SFT. (SFT: Recovering realistic texture in image super-resolution by deep spatial feature transform.) Args: num_feat (int): Channel number of middle features. Default: 64. num_frame (int): Number of frames. Default: 5. center_frame_idx (int): The index of center frame. Default: 2. """ def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2): super(TSAFusion, self).__init__() self.center_frame_idx = center_frame_idx self.temporal_attn1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.temporal_attn2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.feat_fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1) self.max_pool = nn.MaxPool2d(3, stride=2, padding=1) self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1) self.spatial_attn1 = nn.Conv2d(num_frame * num_feat, num_feat, 1) self.spatial_attn2 = nn.Conv2d(num_feat * 2, num_feat, 1) self.spatial_attn3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn4 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn5 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_l1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_l2 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1) self.spatial_attn_l3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_add1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_add2 = nn.Conv2d(num_feat, num_feat, 1) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, aligned_feat): """ Args: aligned_feat (Tensor): Aligned features with shape (b, t, c, h, w). Returns: Tensor: Features after TSA with the shape (b, c, h, w). """ b, t, c, h, w = aligned_feat.size() embedding_ref = self.temporal_attn1(aligned_feat[:, self. center_frame_idx, :, :, :].clone()) embedding = self.temporal_attn2(aligned_feat.view(-1, c, h, w)) embedding = embedding.view(b, t, -1, h, w) corr_l = [] for i in range(t): emb_neighbor = embedding[:, i, :, :, :] corr = torch.sum(emb_neighbor * embedding_ref, 1) corr_l.append(corr.unsqueeze(1)) corr_prob = torch.sigmoid(torch.cat(corr_l, dim=1)) corr_prob = corr_prob.unsqueeze(2).expand(b, t, c, h, w) corr_prob = corr_prob.contiguous().view(b, -1, h, w) aligned_feat = aligned_feat.view(b, -1, h, w) * corr_prob feat = self.lrelu(self.feat_fusion(aligned_feat)) attn = self.lrelu(self.spatial_attn1(aligned_feat)) attn_max = self.max_pool(attn) attn_avg = self.avg_pool(attn) attn = self.lrelu(self.spatial_attn2(torch.cat([attn_max, attn_avg], dim=1))) attn_level = self.lrelu(self.spatial_attn_l1(attn)) attn_max = self.max_pool(attn_level) attn_avg = self.avg_pool(attn_level) attn_level = self.lrelu(self.spatial_attn_l2(torch.cat([attn_max, attn_avg], dim=1))) attn_level = self.lrelu(self.spatial_attn_l3(attn_level)) attn_level = self.upsample(attn_level) attn = self.lrelu(self.spatial_attn3(attn)) + attn_level attn = self.lrelu(self.spatial_attn4(attn)) attn = self.upsample(attn) attn = self.spatial_attn5(attn) attn_add = self.spatial_attn_add2(self.lrelu(self.spatial_attn_add1 (attn))) attn = torch.sigmoid(attn) feat = feat * attn * 2 + attn_add return feat def get_inputs(): return [torch.rand([4, 5, 64, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 1024 x1 = xindex // 1024 x2 = xindex tmp0 = tl.load(in_ptr0 + (2048 + x0 + 5120 * x1), None) tl.store(out_ptr0 + x2, tmp0, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_per_fused_cat_mul_sum_3(in_ptr0, in_ptr1, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp7 = tl.load(in_ptr0 + (1024 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (2048 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp19 = tl.load(in_ptr0 + (3072 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp25 = tl.load(in_ptr0 + (4096 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp7 * tmp1 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp14 = tmp13 * tmp1 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp20 = tmp19 * tmp1 tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp23 = tl.where(xmask, tmp21, 0) tmp24 = tl.sum(tmp23, 1)[:, None] tmp26 = tmp25 * tmp1 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.where(xmask, tmp27, 0) tmp30 = tl.sum(tmp29, 1)[:, None] tl.store(out_ptr5 + (x0 + 80 * x1), tmp6, xmask) tl.store(out_ptr6 + (x0 + 80 * x1), tmp12, xmask) tl.store(out_ptr7 + (x0 + 80 * x1), tmp18, xmask) tl.store(out_ptr8 + (x0 + 80 * x1), tmp24, xmask) tl.store(out_ptr9 + (x0 + 80 * x1), tmp30, xmask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 320 x2 = xindex // 5120 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 16 * (x1 // 64) + 80 * x2), None) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x5 = xindex // 2 x3 = xindex // 256 x6 = xindex % 256 x7 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp78 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp79 = tmp78 + tmp77 tmp80 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp81 = tmp80 + tmp79 tmp82 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tmp82 + tmp81 tmp84 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tmp84 + tmp83 tmp86 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp87 = tmp86 + tmp85 tmp88 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tmp88 + tmp87 tmp90 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp91 = tmp90 + tmp89 tmp92 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask, eviction_policy='evict_last', other=0.0) tmp93 = tmp92 + tmp91 tmp94 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -2 * x0 * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -2 * x1 * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) + (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5) ) tmp95 = tmp93 / tmp94 tl.store(out_ptr0 + (x6 + 512 * x3), tmp51, xmask) tl.store(out_ptr1 + x7, tmp76, xmask) tl.store(out_ptr2 + (x6 + 512 * x3), tmp95, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.full([1], -1, tl.int64) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tmp5 & tmp5 tmp7 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp8 = tmp1 >= tmp1 tmp9 = tmp1 < tmp3 tmp10 = tmp8 & tmp9 tmp11 = tmp5 & tmp10 tmp12 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy ='evict_last', other=float('-inf')) tmp13 = triton_helpers.maximum(tmp12, tmp7) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp14 >= tmp1 tmp16 = tmp14 < tmp3 tmp17 = tmp15 & tmp16 tmp18 = tmp5 & tmp17 tmp19 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy ='evict_last', other=float('-inf')) tmp20 = triton_helpers.maximum(tmp19, tmp13) tmp21 = tmp10 & tmp5 tmp22 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy ='evict_last', other=float('-inf')) tmp23 = triton_helpers.maximum(tmp22, tmp20) tmp24 = tmp10 & tmp10 tmp25 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp26 = triton_helpers.maximum(tmp25, tmp23) tmp27 = tmp10 & tmp17 tmp28 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp29 = triton_helpers.maximum(tmp28, tmp26) tmp30 = tmp17 & tmp5 tmp31 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp29) tmp33 = tmp17 & tmp10 tmp34 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp17 & tmp17 tmp37 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = tmp12 > tmp7 tmp40 = tl.full([1], 1, tl.int8) tmp41 = tl.full([1], 0, tl.int8) tmp42 = tl.where(tmp39, tmp40, tmp41) tmp43 = tmp19 > tmp13 tmp44 = tl.full([1], 2, tl.int8) tmp45 = tl.where(tmp43, tmp44, tmp42) tmp46 = tmp22 > tmp20 tmp47 = tl.full([1], 3, tl.int8) tmp48 = tl.where(tmp46, tmp47, tmp45) tmp49 = tmp25 > tmp23 tmp50 = tl.full([1], 4, tl.int8) tmp51 = tl.where(tmp49, tmp50, tmp48) tmp52 = tmp28 > tmp26 tmp53 = tl.full([1], 5, tl.int8) tmp54 = tl.where(tmp52, tmp53, tmp51) tmp55 = tmp31 > tmp29 tmp56 = tl.full([1], 6, tl.int8) tmp57 = tl.where(tmp55, tmp56, tmp54) tmp58 = tmp34 > tmp32 tmp59 = tl.full([1], 7, tl.int8) tmp60 = tl.where(tmp58, tmp59, tmp57) tmp61 = tmp37 > tmp35 tmp62 = tl.full([1], 8, tl.int8) tmp63 = tl.where(tmp61, tmp62, tmp60) tmp64 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp65 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy ='evict_last', other=0.0) tmp66 = tmp65 + tmp64 tmp67 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy ='evict_last', other=0.0) tmp68 = tmp67 + tmp66 tmp69 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy ='evict_last', other=0.0) tmp70 = tmp69 + tmp68 tmp71 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp72 = tmp71 + tmp70 tmp73 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp74 = tmp73 + tmp72 tmp75 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp76 = tmp75 + tmp74 tmp77 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy= 'evict_last', other=0.0) tmp78 = tmp77 + tmp76 tmp79 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy= 'evict_last', other=0.0) tmp80 = tmp79 + tmp78 tmp81 = tl.full([1], 9, tl.int32) tmp82 = tmp80 / tmp81 tl.store(out_ptr0 + (x0 + 128 * x1), tmp38, xmask) tl.store(out_ptr1 + x2, tmp63, xmask) tl.store(out_ptr2 + (x0 + 128 * x1), tmp82, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 0, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x5 = xindex // 4 x2 = xindex // 4 % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x6, xmask) tmp26 = tl.load(in_ptr7 + x2, xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 1, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tl.where(tmp7, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.1 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp18 = tmp17 + tmp1 tmp19 = tmp17 < 0 tl.where(tmp19, tmp18, tmp17) tmp21 = tmp16 - tmp16 tmp23 = tmp21 * tmp22 tmp24 = tmp16 + tmp23 tmp27 = tmp25 + tmp26 tmp28 = tmp27 > tmp12 tmp29 = tmp27 * tmp14 tmp30 = tl.where(tmp28, tmp27, tmp29) tmp32 = tmp31 + tmp1 tmp33 = tmp31 < 0 tl.where(tmp33, tmp32, tmp31) tmp35 = tmp24 - tmp24 tmp37 = tmp35 * tmp36 tmp38 = tmp24 + tmp37 tmp39 = tmp30 + tmp38 tmp40 = tmp30 > tmp12 tl.store(in_out_ptr0 + x6, tmp39, xmask) tl.store(out_ptr0 + x6, tmp40, xmask) @triton.jit def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = triton_helpers.minimum(tmp10, tmp9) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 4 x0 = xindex % 4 x6 = xindex // 16 x2 = xindex // 16 % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.1 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp18 = tmp17 + tmp1 tmp19 = tmp17 < 0 tmp20 = tl.where(tmp19, tmp18, tmp17) tmp21 = tl.load(in_ptr2 + (tmp20 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp10 tmp23 = tmp22 > tmp12 tmp24 = tmp22 * tmp14 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tmp25 - tmp16 tmp28 = tmp26 * tmp27 tmp29 = tmp16 + tmp28 tmp31 = tmp30 + tmp1 tmp32 = tmp30 < 0 tmp33 = tl.where(tmp32, tmp31, tmp30) tmp34 = tl.load(in_ptr2 + (tmp8 + 2 * tmp33 + 4 * x6), None, eviction_policy='evict_last') tmp35 = tmp34 + tmp10 tmp36 = tmp35 > tmp12 tmp37 = tmp35 * tmp14 tmp38 = tl.where(tmp36, tmp35, tmp37) tmp39 = tl.load(in_ptr2 + (tmp20 + 2 * tmp33 + 4 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp10 tmp41 = tmp40 > tmp12 tmp42 = tmp40 * tmp14 tmp43 = tl.where(tmp41, tmp40, tmp42) tmp44 = tmp43 - tmp38 tmp45 = tmp44 * tmp27 tmp46 = tmp38 + tmp45 tmp47 = tmp46 - tmp29 tmp49 = tmp47 * tmp48 tmp50 = tmp29 + tmp49 tl.store(in_out_ptr0 + x4, tmp50, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + x3, None) tmp13 = tl.load(in_out_ptr1 + x3, None) tmp14 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp9 = tl.sigmoid(tmp8) tmp10 = tmp7 * tmp9 tmp11 = 2.0 tmp12 = tmp10 * tmp11 tmp15 = tmp13 + tmp14 tmp16 = tmp12 + tmp15 tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(in_out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (4, 5, 64, 4, 4), (5120, 1024, 16, 4, 1)) assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 320, 1, 1), (320, 1, 1, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 320, 1, 1), (320, 1, 1, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_27, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4096)](primals_1, buf0, 4096, XBLOCK= 128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(4096)](buf2, primals_3, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (20, 64, 4, 4), (1024, 16, 4, 1), 0), primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (20, 64, 4, 4), (1024, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(20480)](buf4, primals_5, 20480, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf10 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0) buf11 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 16) buf12 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 32) buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 48) buf14 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 64) triton_per_fused_cat_mul_sum_3[grid(64)](buf4, buf2, buf10, buf11, buf12, buf13, buf14, 64, 64, XBLOCK=32, num_warps=8, num_stages=1) buf16 = empty_strided_cuda((4, 320, 4, 4), (5120, 16, 4, 1), torch. float32) triton_poi_fused_mul_4[grid(20480)](primals_1, buf15, buf16, 20480, XBLOCK=256, num_warps=4, num_stages=1) buf17 = extern_kernels.convolution(buf16, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 4, 4), (1024, 16, 4, 1)) buf19 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 64, 4, 4), (1024, 16, 4, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf20, primals_9, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 4, 2, 1), torch. float32) buf21 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 0) buf22 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.int8) buf23 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 256) triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6[grid(1024)](buf20 , buf21, buf22, buf23, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf25 = extern_kernels.convolution(buf24, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 64, 2, 2), (256, 4, 2, 1)) buf26 = buf25 del buf25 triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf26, primals_11, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 64, 2, 2), (256, 4, 2, 1)) buf28 = buf27 del buf27 triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf28, primals_13, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch. float32) buf29 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 0) buf30 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.int8) buf31 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 64) triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8[grid(256)](buf28, buf29, buf30, buf31, 256, XBLOCK=128, num_warps=4, num_stages=1) buf33 = extern_kernels.convolution(buf32, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 64, 1, 1), (64, 1, 1, 1)) buf34 = buf33 del buf33 triton_poi_fused_convolution_leaky_relu_9[grid(256)](buf34, primals_15, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf35 = extern_kernels.convolution(buf34, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 1, 1), (64, 1, 1, 1)) buf36 = empty_strided_cuda((2, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_10[grid(2)](buf36, 2, XBLOCK=2, num_warps =1, num_stages=1) buf37 = empty_strided_cuda((2, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_11[grid(2)](buf37, 2, XBLOCK=2, num_warps=1, num_stages=1) buf38 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused__to_copy_10[grid(2)](buf38, 2, XBLOCK=2, num_warps =1, num_stages=1) buf39 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused_add_clamp_11[grid(2)](buf39, 2, XBLOCK=2, num_warps=1, num_stages=1) buf40 = empty_strided_cuda((2,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf40, 2, XBLOCK=2, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((2, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf42, 2, XBLOCK=2, num_warps=1, num_stages=1) buf43 = extern_kernels.convolution(buf26, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 64, 2, 2), (256, 4, 2, 1)) buf41 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.float32 ) buf44 = buf41 del buf41 buf62 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13[ grid(1024)](buf44, buf36, buf38, buf35, primals_17, buf39, buf40, buf43, primals_19, buf37, buf42, buf62, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf43 del primals_19 buf45 = extern_kernels.convolution(buf44, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 64, 2, 2), (256, 4, 2, 1)) buf46 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_14[grid(4)](buf46, 4, XBLOCK=4, num_warps =1, num_stages=1) buf47 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_15[grid(4)](buf47, 4, XBLOCK=4, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_14[grid(4)](buf48, 4, XBLOCK=4, num_warps =1, num_stages=1) buf49 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_15[grid(4)](buf49, 4, XBLOCK=4, num_warps=1, num_stages=1) buf50 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf50, 4, XBLOCK=4, num_warps=1, num_stages=1) buf52 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf52, 4, XBLOCK=4, num_warps=1, num_stages=1) buf53 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) buf54 = buf53 del buf53 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17[ grid(4096)](buf54, buf46, buf48, buf45, primals_21, buf49, buf50, buf47, buf52, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf55 = extern_kernels.convolution(buf54, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 64, 4, 4), (1024, 16, 4, 1)) buf56 = buf55 del buf55 triton_poi_fused_convolution_1[grid(4096)](buf56, primals_23, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf57 = extern_kernels.convolution(buf56, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 64, 4, 4), (1024, 16, 4, 1)) buf58 = buf57 del buf57 triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf58, primals_25, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_25 buf59 = extern_kernels.convolution(buf58, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 64, 4, 4), (1024, 16, 4, 1)) buf18 = buf17 del buf17 buf60 = buf59 del buf59 triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18[grid(4096)]( buf18, buf60, primals_7, buf56, primals_27, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_27 del primals_7 buf61 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19[grid (1024)](buf45, primals_21, buf61, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf45 del primals_21 buf63 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20[grid (256)](buf35, primals_17, buf63, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf35 del primals_17 return (buf60, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, buf0, buf2, reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 1024), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 2048), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 3072), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 4096), buf15, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30, buf32, buf34, buf36, buf37, buf38, buf39, buf40, buf42, buf44, buf46, buf47, buf48, buf49, buf50, buf52, buf54, buf56, buf58, buf61, buf62, buf63) class TSAFusionNew(nn.Module): """Temporal Spatial Attention (TSA) fusion module. Temporal: Calculate the correlation between center frame and neighboring frames; Spatial: It has 3 pyramid levels, the attention is similar to SFT. (SFT: Recovering realistic texture in image super-resolution by deep spatial feature transform.) Args: num_feat (int): Channel number of middle features. Default: 64. num_frame (int): Number of frames. Default: 5. center_frame_idx (int): The index of center frame. Default: 2. """ def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2): super(TSAFusionNew, self).__init__() self.center_frame_idx = center_frame_idx self.temporal_attn1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.temporal_attn2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.feat_fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1) self.max_pool = nn.MaxPool2d(3, stride=2, padding=1) self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1) self.spatial_attn1 = nn.Conv2d(num_frame * num_feat, num_feat, 1) self.spatial_attn2 = nn.Conv2d(num_feat * 2, num_feat, 1) self.spatial_attn3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn4 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn5 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_l1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_l2 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1) self.spatial_attn_l3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_add1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_add2 = nn.Conv2d(num_feat, num_feat, 1) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, input_0): primals_2 = self.temporal_attn1.weight primals_3 = self.temporal_attn1.bias primals_4 = self.temporal_attn2.weight primals_5 = self.temporal_attn2.bias primals_6 = self.feat_fusion.weight primals_7 = self.feat_fusion.bias primals_8 = self.spatial_attn1.weight primals_9 = self.spatial_attn1.bias primals_10 = self.spatial_attn2.weight primals_11 = self.spatial_attn2.bias primals_16 = self.spatial_attn3.weight primals_13 = self.spatial_attn3.bias primals_12 = self.spatial_attn4.weight primals_15 = self.spatial_attn4.bias primals_18 = self.spatial_attn5.weight primals_17 = self.spatial_attn5.bias primals_20 = self.spatial_attn_l1.weight primals_19 = self.spatial_attn_l1.bias primals_14 = self.spatial_attn_l2.weight primals_21 = self.spatial_attn_l2.bias primals_22 = self.spatial_attn_l3.weight primals_23 = self.spatial_attn_l3.bias primals_24 = self.spatial_attn_add1.weight primals_25 = self.spatial_attn_add1.bias primals_26 = self.spatial_attn_add2.weight primals_27 = self.spatial_attn_add2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
grofit/traiNNer
TSAFusion
false
15,513
[ "Apache-2.0" ]
78
12d006fd44ed304e4178839c53b1f3d95ca25dcb
https://github.com/grofit/traiNNer/tree/12d006fd44ed304e4178839c53b1f3d95ca25dcb
CReLU
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class CReLU(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(2 * nchannels) self.relu = nn.ReLU(inplace=True) self.in_channels = nchannels self.out_channels = 2 * nchannels def forward(self, x): x1 = torch.cat((x, -x), 1) x2 = self.scale(x1) y = self.relu(x2) return y def __repr__(self): s = '{name} ({in_channels}, {out_channels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_cat_mul_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tmp20 = 0.0 tmp21 = tmp19 <= tmp20 tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp19, xmask) tl.store(out_ptr2 + x3, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_3, (1, 8, 1, 1), (8, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_cat_mul_relu_threshold_backward_0[grid(512)]( primals_1, primals_2, primals_3, buf0, buf1, buf2, 512, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 return buf1, buf0, buf2 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class CReLUNew(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(2 * nchannels) self.relu = nn.ReLU(inplace=True) self.in_channels = nchannels self.out_channels = 2 * nchannels def __repr__(self): s = '{name} ({in_channels}, {out_channels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_2 = self.scale.weight primals_3 = self.scale.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
CReLU
false
15,514
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
MixPad2d
import torch from torch import nn class MixPad2d(nn.Module): """Mixed padding modes for H and W dimensions Args: padding (tuple): the size of the padding for x and y, ie (pad_x, pad_y) modes (tuple): the padding modes for x and y, the values of each can be ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``['replicate', 'circular']`` """ __constants__ = ['modes', 'padding'] def __init__(self, padding=[1, 1], modes=['replicate', 'circular']): super(MixPad2d, self).__init__() assert len(padding) == 2 self.padding = padding self.modes = modes def forward(self, x): x = nn.functional.pad(x, (0, 0, self.padding[1], self.padding[1]), self.modes[1]) x = nn.functional.pad(x, (self.padding[0], self.padding[0], 0, 0), self.modes[0]) return x def extra_repr(self): repr_ = ( 'Mixed Padding: \t x axis: mode: {}, padding: {},\n\t y axis mode: {}, padding: {}' .format(self.modes[0], self.padding[0], self.modes[1], self. padding[1])) return repr_ def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x2 = xindex // 24 x3 = xindex % 24 x4 = xindex tmp38 = tl.load(in_ptr1 + x4, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp10 & xmask, other=0.0) tmp12 = tl.load(in_ptr1 + x4, tmp6 & xmask, other=0.0) tmp13 = tl.where(tmp9, tmp11, tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tmp3 >= tmp4 tmp17 = tmp3 < tmp1 tmp18 = tmp16 & tmp17 tmp19 = tmp18 & tmp2 tmp20 = tl.load(in_ptr0 + (-20 + x3 + 16 * x2), tmp19 & xmask, other=0.0) tmp21 = tl.load(in_ptr1 + (-16 + x4), tmp2 & xmask, other=0.0) tmp22 = tl.where(tmp18, tmp20, tmp21) tmp23 = tl.where(tmp5, tmp15, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = 4 + x1 tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x3 + 16 * x2), tmp31 & xmask, other=0.0) tmp33 = tl.load(in_ptr1 + (16 + x4), tmp26 & xmask, other=0.0) tmp34 = tl.where(tmp30, tmp32, tmp33) tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp26, tmp34, tmp35) tmp37 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp9 & xmask, other=0.0) tmp39 = tl.where(tmp9, tmp37, tmp38) tmp40 = tl.where(tmp26, tmp36, tmp39) tmp41 = tl.where(tmp2, tmp25, tmp40) tl.store(out_ptr0 + x4, tmp41, xmask) @triton.jit def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (5 * (5 <= x1) + x1 * (x1 < 5)) + 24 * x2 + (3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * ( 0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(384)](arg0_1, buf0, buf1, 384, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 del buf0 buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_replication_pad2d_1[grid(576)](buf1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) del buf1 return buf2, class MixPad2dNew(nn.Module): """Mixed padding modes for H and W dimensions Args: padding (tuple): the size of the padding for x and y, ie (pad_x, pad_y) modes (tuple): the padding modes for x and y, the values of each can be ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``['replicate', 'circular']`` """ __constants__ = ['modes', 'padding'] def __init__(self, padding=[1, 1], modes=['replicate', 'circular']): super(MixPad2dNew, self).__init__() assert len(padding) == 2 self.padding = padding self.modes = modes def extra_repr(self): repr_ = ( 'Mixed Padding: \t x axis: mode: {}, padding: {},\n\t y axis mode: {}, padding: {}' .format(self.modes[0], self.padding[0], self.modes[1], self. padding[1])) return repr_ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hhj1897/face_parsing
MixPad2d
false
15,515
[ "MIT" ]
70
9cd26b6916f562a2ab356b6b22e9ad93e19f2051
https://github.com/hhj1897/face_parsing/tree/9cd26b6916f562a2ab356b6b22e9ad93e19f2051
PaddedMaxPool2d
import torch import torch.nn as nn import torch.nn.functional as F class PaddedMaxPool2d(nn.Module): """ Maxpool layer with a replicating padding. Args: kernel_size (int or tuple): Kernel size for maxpooling stride (int or tuple, optional): The stride of the window; Default ``kernel_size`` padding (tuple, optional): (left, right, top, bottom) padding; Default **None** dilation (int or tuple, optional): A parameter that controls the stride of elements in the window """ def __init__(self, kernel_size, stride=None, padding=(0, 0, 0, 0), dilation=1): super(PaddedMaxPool2d, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation def __repr__(self): return ( f'{self.__class__.__name__} (kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})' ) def forward(self, x): x = F.max_pool2d(F.pad(x, self.padding, mode='replicate'), self. kernel_size, self.stride, 0, self.dilation) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class PaddedMaxPool2dNew(nn.Module): """ Maxpool layer with a replicating padding. Args: kernel_size (int or tuple): Kernel size for maxpooling stride (int or tuple, optional): The stride of the window; Default ``kernel_size`` padding (tuple, optional): (left, right, top, bottom) padding; Default **None** dilation (int or tuple, optional): A parameter that controls the stride of elements in the window """ def __init__(self, kernel_size, stride=None, padding=(0, 0, 0, 0), dilation=1): super(PaddedMaxPool2dNew, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation def __repr__(self): return ( f'{self.__class__.__name__} (kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})' ) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
PaddedMaxPool2d
false
15,516
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
ScaleReLU
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class ScaleReLU(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(nchannels) self.relu = nn.ReLU(inplace=True) self.nchannels = nchannels def forward(self, x): x1 = self.scale(x) y = self.relu(x1) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_mul_relu_threshold_backward_0[grid(256)](primals_2 , primals_1, primals_3, buf0, buf1, 256, XBLOCK=128, num_warps= 4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2, buf1 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class ScaleReLUNew(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(nchannels) self.relu = nn.ReLU(inplace=True) self.nchannels = nchannels def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_1 = self.scale.weight primals_3 = self.scale.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
ScaleReLU
false
15,517
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
DiceLoss
import torch import torch.nn as nn def binaray_dice_loss(predict, target, smooth=1, p=2, weight=None): """Dice loss for binary classification Args: predict(Tensor): a tensor of shape [N, H, W] target(Tensor): a tensor of shape same with predict smooth(float): a float number to smooth loss, and avoid NaN error, default:1 p(int): Denominator value, \\sum{x^p}+\\sum{y^p}, default:2 weight: (Tensor): pixel-wised loss weight, the shape is [H, W] Returns: Tensor: loss tensor """ assert predict.shape[0] == target.shape[0] if weight is not None: predict = torch.mul(predict, weight) target = torch.mul(target, weight) predict = predict.contiguous().view(predict.shape[0], -1) target = target.contiguous().view(target.shape[0], -1) num = torch.sum(torch.mul(predict, target)) * 2 + smooth den = torch.sum(predict.pow(p) + target.pow(p)) + smooth loss = 1 - num / den return loss class DiceLoss(nn.Module): """Dice loss for multi-class classification. [1] Ref: https://github.com/hubutui/DiceLoss-PyTorch """ def __init__(self, smooth=1, p=2, loss_weight=1.0): """ Initialization. Args: smooth(float): a float number to smooth loss, and avoid NaN error, default:1 p(int): Denominator value, \\sum{x^p}+\\sum{y^p}, default:2 loss_weight(float): loss weight """ super().__init__() self.smooth = smooth self.p = p self.loss_weight = loss_weight def forward(self, pred, target, weight=None, weight_in_channel=None): """ Multiply loss with loss_weight. Args: predict(Tensor): a tensor of shape [N, C, H, W] target(Tensor): a tensor of shape same with predict weight(Tensor): pixel-wised weight tensor, whose shape is [N, H, W] weight_in_channel(Tensor): channel-wised weight tensor, whose shape is [N, C] Returns: Tensor: loss tensor """ loss = self.loss_weight * self._multi_cls_loss(pred, target, weight =weight, weight_in_channel=weight_in_channel) return loss def _multi_cls_loss(self, predict, target, weight=None, weight_in_channel=None): """Dice loss for multi-class classification (as the expected value of multiple dices losses for binary classificaitions seperately) Arg: predict(Tensor): feature map predictions, [N, num_classes, H, W], where for num_classes classes, each contains a map of shape [H, W] target(Tensor) : feature map ground-truth labels (one-hot encoding) [N, num_classes, H, W], where for num_classes classes, each contains a map of shape [H, W] weight(Tensor) : [N, H, W], mask (or weight) of feature map ground-truth labels, no loss generates in the pixel if corresponding element of weight is 0 mask (weight) weight_in_channel(Tensor): [N, num_classes], weight for channels Returns: loss tensor """ assert predict.shape == target.shape if weight is not None: assert predict[0, 0].shape == weight[0].shape if weight_in_channel is not None: predict = torch.mul(predict, weight_in_channel) target = torch.mul(target, weight_in_channel) total_loss = 0 for i in range(target.shape[1]): dice_loss = binaray_dice_loss(predict[:, i], target[:, i], self .smooth, self.p, weight=weight) total_loss += dice_loss return total_loss / target.shape[1] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp12 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp25 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp36 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp37 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tmp0 * tmp0 tmp7 = tmp1 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tmp12 * tmp12 tmp19 = tmp13 * tmp13 tmp20 = tmp18 + tmp19 tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tmp24 * tmp24 tmp31 = tmp25 * tmp25 tmp32 = tmp30 + tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tmp36 * tmp36 tmp43 = tmp37 * tmp37 tmp44 = tmp42 + tmp43 tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp11 + tmp50 tmp53 = tmp51 / tmp52 tmp54 = tmp50 - tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp23 + tmp50 tmp60 = tmp58 / tmp59 tmp61 = tmp50 - tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp35 + tmp50 tmp66 = tmp64 / tmp65 tmp67 = tmp50 - tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp47 + tmp50 tmp72 = tmp70 / tmp71 tmp73 = tmp50 - tmp72 tmp74 = tmp68 + tmp73 tmp75 = 0.25 tmp76 = tmp74 * tmp75 tmp77 = tmp76 * tmp50 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp77, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf8 = buf0 del buf0 buf9 = buf8 del buf8 get_raw_stream(0) triton_per_fused_add_div_mul_pow_rsub_sum_0[grid(1)](buf9, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf9, def binaray_dice_loss(predict, target, smooth=1, p=2, weight=None): """Dice loss for binary classification Args: predict(Tensor): a tensor of shape [N, H, W] target(Tensor): a tensor of shape same with predict smooth(float): a float number to smooth loss, and avoid NaN error, default:1 p(int): Denominator value, \\sum{x^p}+\\sum{y^p}, default:2 weight: (Tensor): pixel-wised loss weight, the shape is [H, W] Returns: Tensor: loss tensor """ assert predict.shape[0] == target.shape[0] if weight is not None: predict = torch.mul(predict, weight) target = torch.mul(target, weight) predict = predict.contiguous().view(predict.shape[0], -1) target = target.contiguous().view(target.shape[0], -1) num = torch.sum(torch.mul(predict, target)) * 2 + smooth den = torch.sum(predict.pow(p) + target.pow(p)) + smooth loss = 1 - num / den return loss class DiceLossNew(nn.Module): """Dice loss for multi-class classification. [1] Ref: https://github.com/hubutui/DiceLoss-PyTorch """ def __init__(self, smooth=1, p=2, loss_weight=1.0): """ Initialization. Args: smooth(float): a float number to smooth loss, and avoid NaN error, default:1 p(int): Denominator value, \\sum{x^p}+\\sum{y^p}, default:2 loss_weight(float): loss weight """ super().__init__() self.smooth = smooth self.p = p self.loss_weight = loss_weight def _multi_cls_loss(self, predict, target, weight=None, weight_in_channel=None): """Dice loss for multi-class classification (as the expected value of multiple dices losses for binary classificaitions seperately) Arg: predict(Tensor): feature map predictions, [N, num_classes, H, W], where for num_classes classes, each contains a map of shape [H, W] target(Tensor) : feature map ground-truth labels (one-hot encoding) [N, num_classes, H, W], where for num_classes classes, each contains a map of shape [H, W] weight(Tensor) : [N, H, W], mask (or weight) of feature map ground-truth labels, no loss generates in the pixel if corresponding element of weight is 0 mask (weight) weight_in_channel(Tensor): [N, num_classes], weight for channels Returns: loss tensor """ assert predict.shape == target.shape if weight is not None: assert predict[0, 0].shape == weight[0].shape if weight_in_channel is not None: predict = torch.mul(predict, weight_in_channel) target = torch.mul(target, weight_in_channel) total_loss = 0 for i in range(target.shape[1]): dice_loss = binaray_dice_loss(predict[:, i], target[:, i], self .smooth, self.p, weight=weight) total_loss += dice_loss return total_loss / target.shape[1] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hikopensource/DAVAR-Lab-OCR
DiceLoss
false
15,518
[ "Apache-2.0" ]
387
c65285f6668864cca7a12770ae4c8d083ea1cf1b
https://github.com/hikopensource/DAVAR-Lab-OCR/tree/c65285f6668864cca7a12770ae4c8d083ea1cf1b
TransformerBlock
import math import torch import torch.nn.functional as F def gelu(x): """ GELU activation function. """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class MultiHeadedAttention(torch.nn.Module): """ Implement of multi-head attention. """ def __init__(self, n_heads, hidden_size, drop_rate): super().__init__() assert hidden_size % n_heads == 0 self.n_dk = hidden_size // n_heads self.n_heads = n_heads self.proj_query = torch.nn.Linear(hidden_size, hidden_size) self.proj_key = torch.nn.Linear(hidden_size, hidden_size) self.proj_value = torch.nn.Linear(hidden_size, hidden_size) self.dropout = torch.nn.Dropout(drop_rate) self.proj_output = torch.nn.Linear(hidden_size, hidden_size) def forward(self, input_, mask=None): """ Input: embedding. """ batch_size = input_.size(0) query = self.proj_query(input_) query = query.view(batch_size, -1, self.n_heads, self.n_dk).transpose( 1, 2) key = self.proj_key(input_) key = key.view(batch_size, -1, self.n_heads, self.n_dk).transpose(1, 2) value = self.proj_value(input_) value = value.view(batch_size, -1, self.n_heads, self.n_dk).transpose( 1, 2) scores = query @ key.transpose(-2, -1) scores = scores / math.sqrt(self.n_dk) if mask is not None: mask = mask[:, None, None, :] scores = scores.masked_fill(mask == 0, -1000000000.0) attn = F.softmax(scores, dim=-1) attn = self.dropout(attn) cv = attn @ value cv = cv.transpose(1, 2) cv = cv.contiguous().view(batch_size, -1, self.n_heads * self.n_dk) return self.proj_output(cv) class LayerNormalization(torch.nn.Module): """ Epsilon outsize the square root. """ def __init__(self, size, eps=1e-06): super(LayerNormalization, self).__init__() self.gamma = torch.nn.Parameter(torch.ones(size)) self.beta = torch.nn.Parameter(torch.zeros(size)) self.eps = eps self.register_parameter('gamma', self.gamma) self.register_parameter('beta', self.beta) def forward(self, input_): mean = torch.mean(input_, -1, keepdim=True) std = torch.std(input_, -1, keepdim=True) return self.gamma * (input_ - mean) / (std + self.eps) + self.beta class PositionwiseFeedForward(torch.nn.Module): """ FeedForward Neural Networks for each position """ def __init__(self, input_size, hidden_size, output_size, drop_rate): super(PositionwiseFeedForward, self).__init__() self.ff1 = torch.nn.Linear(input_size, hidden_size) self.ff2 = torch.nn.Linear(hidden_size, output_size) self.drop = torch.nn.Dropout(drop_rate) def forward(self, input_): """ (B, S, D) -> (B, S, D_ff) -> (B, S, D) """ return self.drop(self.ff2(gelu(self.ff1(input_)))) class TransformerBlock(torch.nn.Module): """ Implementation of Transformer """ def __init__(self, input_size, n_heads, drop_rate, device=torch.device( 'cpu')): super().__init__() self.attentionMH = MultiHeadedAttention(n_heads, input_size, drop_rate) self.norm1 = LayerNormalization(input_size) self.norm2 = LayerNormalization(input_size) self.layer_ff = PositionwiseFeedForward(input_size, input_size * 4, input_size, drop_rate) self.drop = torch.nn.Dropout(drop_rate) def forward(self, input_, mask=None): """ Transformer """ hd = self.attentionMH(input_, mask) hd = self.norm1(input_ + self.drop(hd)) hd = self.norm2(hd + self.layer_ff(hd)) return self.drop(hd) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'n_heads': 4, 'drop_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp6 = tmp5 == 0 tmp7 = tmp0 - tmp0 tmp8 = tl_math.exp(tmp7) tmp9 = tmp8 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp6, tmp10, tmp9) tl.store(in_out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused_add_mean_std_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex % 16 x2 = xindex // 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tmp0 * tmp5 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x5, tmp13, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (16, 4), (4, 1)) assert_size_stride(primals_13, (16,), (1,)) assert_size_stride(primals_14, (4, 16), (16, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf1 triton_poi_fused_0[grid(16)](buf4, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 1, 1), (1, 0, 0), 0), reinterpret_tensor(buf4, (16, 1, 1), (1, 0, 0), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf5 triton_poi_fused_1[grid(16)](buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf8) del primals_9 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_std_2[grid(16)](buf10, primals_1, buf8, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_3[grid(64)](primals_10, primals_1, buf8, buf11, buf10, primals_11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_11 buf13 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_13 buf14 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_add_div_erf_mul_4[grid(256)](buf13, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf14, (16, 16), (16, 1), 0), reinterpret_tensor(primals_14, (16, 4), (1, 16), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0) del buf15 triton_poi_fused_add_5[grid(64)](buf16, buf12, primals_15, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_6[grid(64)](primals_16, buf16, primals_17, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_17 return buf17, primals_1, primals_10, primals_16, buf6, reinterpret_tensor( buf2, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf3, (16, 1, 1 ), (1, 1, 4), 0), reinterpret_tensor(buf4, (16, 1, 1), (1, 4, 1), 0 ), reinterpret_tensor(buf7, (4, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), buf13, reinterpret_tensor(buf14, (16, 16), (16, 1), 0 ), buf16, primals_14, primals_12, primals_8 def gelu(x): """ GELU activation function. """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class MultiHeadedAttention(torch.nn.Module): """ Implement of multi-head attention. """ def __init__(self, n_heads, hidden_size, drop_rate): super().__init__() assert hidden_size % n_heads == 0 self.n_dk = hidden_size // n_heads self.n_heads = n_heads self.proj_query = torch.nn.Linear(hidden_size, hidden_size) self.proj_key = torch.nn.Linear(hidden_size, hidden_size) self.proj_value = torch.nn.Linear(hidden_size, hidden_size) self.dropout = torch.nn.Dropout(drop_rate) self.proj_output = torch.nn.Linear(hidden_size, hidden_size) def forward(self, input_, mask=None): """ Input: embedding. """ batch_size = input_.size(0) query = self.proj_query(input_) query = query.view(batch_size, -1, self.n_heads, self.n_dk).transpose( 1, 2) key = self.proj_key(input_) key = key.view(batch_size, -1, self.n_heads, self.n_dk).transpose(1, 2) value = self.proj_value(input_) value = value.view(batch_size, -1, self.n_heads, self.n_dk).transpose( 1, 2) scores = query @ key.transpose(-2, -1) scores = scores / math.sqrt(self.n_dk) if mask is not None: mask = mask[:, None, None, :] scores = scores.masked_fill(mask == 0, -1000000000.0) attn = F.softmax(scores, dim=-1) attn = self.dropout(attn) cv = attn @ value cv = cv.transpose(1, 2) cv = cv.contiguous().view(batch_size, -1, self.n_heads * self.n_dk) return self.proj_output(cv) class LayerNormalization(torch.nn.Module): """ Epsilon outsize the square root. """ def __init__(self, size, eps=1e-06): super(LayerNormalization, self).__init__() self.gamma = torch.nn.Parameter(torch.ones(size)) self.beta = torch.nn.Parameter(torch.zeros(size)) self.eps = eps self.register_parameter('gamma', self.gamma) self.register_parameter('beta', self.beta) def forward(self, input_): mean = torch.mean(input_, -1, keepdim=True) std = torch.std(input_, -1, keepdim=True) return self.gamma * (input_ - mean) / (std + self.eps) + self.beta class PositionwiseFeedForward(torch.nn.Module): """ FeedForward Neural Networks for each position """ def __init__(self, input_size, hidden_size, output_size, drop_rate): super(PositionwiseFeedForward, self).__init__() self.ff1 = torch.nn.Linear(input_size, hidden_size) self.ff2 = torch.nn.Linear(hidden_size, output_size) self.drop = torch.nn.Dropout(drop_rate) def forward(self, input_): """ (B, S, D) -> (B, S, D_ff) -> (B, S, D) """ return self.drop(self.ff2(gelu(self.ff1(input_)))) class TransformerBlockNew(torch.nn.Module): """ Implementation of Transformer """ def __init__(self, input_size, n_heads, drop_rate, device=torch.device( 'cpu')): super().__init__() self.attentionMH = MultiHeadedAttention(n_heads, input_size, drop_rate) self.norm1 = LayerNormalization(input_size) self.norm2 = LayerNormalization(input_size) self.layer_ff = PositionwiseFeedForward(input_size, input_size * 4, input_size, drop_rate) self.drop = torch.nn.Dropout(drop_rate) def forward(self, input_0): primals_1 = self.attentionMH.proj_query.weight primals_3 = self.attentionMH.proj_query.bias primals_2 = self.attentionMH.proj_key.weight primals_5 = self.attentionMH.proj_key.bias primals_4 = self.attentionMH.proj_value.weight primals_7 = self.attentionMH.proj_value.bias primals_6 = self.attentionMH.proj_output.weight primals_9 = self.attentionMH.proj_output.bias primals_10 = self.norm1.gamma primals_11 = self.norm1.beta primals_15 = self.norm2.gamma primals_16 = self.norm2.beta primals_12 = self.layer_ff.ff1.weight primals_13 = self.layer_ff.ff1.bias primals_14 = self.layer_ff.ff2.weight primals_17 = self.layer_ff.ff2.bias primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
haophancs/TREQS
TransformerBlock
false
15,519
[ "MIT" ]
149
49e354ce2a08cf963ec139d99936020e0f80ced8
https://github.com/haophancs/TREQS/tree/49e354ce2a08cf963ec139d99936020e0f80ced8
L2Norm
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class L2Norm(nn.Module): def __init__(self, nchannels, bias=True): super().__init__() self.scale = Scale(nchannels, bias=bias) self.nchannels = nchannels self.eps = 1e-06 def forward(self, x): l2_norm = x.norm(2, dim=1, keepdim=True) + self.eps x_norm = x.div(l2_norm) y = self.scale(x_norm) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x3, tmp19, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class L2NormNew(nn.Module): def __init__(self, nchannels, bias=True): super().__init__() self.scale = Scale(nchannels, bias=bias) self.nchannels = nchannels self.eps = 1e-06 def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_2 = self.scale.weight primals_3 = self.scale.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
L2Norm
false
15,520
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
MultiHeadedAttention
import torch import numpy as np import torch.utils.data class ScaledDotProductAttention(torch.nn.Module): """ Scaled, softmax attention module for Transformer as defined by Attention(Q, K, V) on pg 4. Returns the final attention vectors as well as the attention matrices (pairwise scores). """ def __init__(self): super(ScaledDotProductAttention, self).__init__() self.softmax = torch.nn.Softmax(dim=-1) def forward(self, Q, K, V, mask=None, dropout=None): scores = torch.matmul(Q, K.transpose(-2, -1)) scores = scores / np.sqrt(K.shape[-1]) if mask is not None: scores = scores.masked_fill(mask == 0, -np.inf) scores = self.softmax(scores) if dropout is not None: scores = dropout(scores) return torch.matmul(scores, V), scores class MultiHeadedAttention(torch.nn.Module): """ Multi-headed attention layer for the Transformer model. Wraps ScaledDotProductAttention. Assumes n_heads are applied by splitting up model in to n_heads, each of size dm / n_heads. Guided by http://nlp.seas.harvard.edu/2018/04/03/attention.html """ def __init__(self, dm, n_heads, dropout=0.1): super(MultiHeadedAttention, self).__init__() assert dm % n_heads == 0, 'The dimension of the model must be evenly divisible by the number of attn heads.' self.dm = dm self.dk = dm // n_heads self.n_heads = n_heads self.wq = torch.nn.Linear(self.dm, self.dm) self.wk = torch.nn.Linear(self.dm, self.dm) self.wv = torch.nn.Linear(self.dm, self.dm) self.wo = torch.nn.Linear(self.dm, self.dm) self.attn_scores = None self.attn = ScaledDotProductAttention() self.dropout = torch.nn.Dropout(dropout) def forward(self, preQ, preK, preV, mask=None): n_batch = preQ.shape[0] Q, K, V = self.wq(preQ), self.wk(preK), self.wv(preV) Q, K, V = (x.view(n_batch, -1, self.n_heads, self.dk).transpose(1, 2) for x in (Q, K, V)) mask = mask.unsqueeze(1) if mask is not None else None attn_output, self.attn_scores = self.attn(Q, K, V, mask, self.dropout) attn_output = attn_output.transpose(1, 2).contiguous().view(n_batch, -1, self.dm) return self.wo(attn_output) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'dm': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_1[grid(256)](buf5, buf8, 256, 16, XBLOCK= 8, num_warps=2, num_stages=1) del buf5 buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_11 return reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0 ), buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) class ScaledDotProductAttention(torch.nn.Module): """ Scaled, softmax attention module for Transformer as defined by Attention(Q, K, V) on pg 4. Returns the final attention vectors as well as the attention matrices (pairwise scores). """ def __init__(self): super(ScaledDotProductAttention, self).__init__() self.softmax = torch.nn.Softmax(dim=-1) def forward(self, Q, K, V, mask=None, dropout=None): scores = torch.matmul(Q, K.transpose(-2, -1)) scores = scores / np.sqrt(K.shape[-1]) if mask is not None: scores = scores.masked_fill(mask == 0, -np.inf) scores = self.softmax(scores) if dropout is not None: scores = dropout(scores) return torch.matmul(scores, V), scores class MultiHeadedAttentionNew(torch.nn.Module): """ Multi-headed attention layer for the Transformer model. Wraps ScaledDotProductAttention. Assumes n_heads are applied by splitting up model in to n_heads, each of size dm / n_heads. Guided by http://nlp.seas.harvard.edu/2018/04/03/attention.html """ def __init__(self, dm, n_heads, dropout=0.1): super(MultiHeadedAttentionNew, self).__init__() assert dm % n_heads == 0, 'The dimension of the model must be evenly divisible by the number of attn heads.' self.dm = dm self.dk = dm // n_heads self.n_heads = n_heads self.wq = torch.nn.Linear(self.dm, self.dm) self.wk = torch.nn.Linear(self.dm, self.dm) self.wv = torch.nn.Linear(self.dm, self.dm) self.wo = torch.nn.Linear(self.dm, self.dm) self.attn_scores = None self.attn = ScaledDotProductAttention() self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0, input_1, input_2): primals_2 = self.wq.weight primals_3 = self.wq.bias primals_4 = self.wk.weight primals_5 = self.wk.bias primals_7 = self.wv.weight primals_8 = self.wv.bias primals_10 = self.wo.weight primals_11 = self.wo.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
hengwei-chan/protein_transformer
MultiHeadedAttention
false
15,521
[ "BSD-3-Clause" ]
77
988bb0fcbb94b37e5a02071bd345ea073ad605f8
https://github.com/hengwei-chan/protein_transformer/tree/988bb0fcbb94b37e5a02071bd345ea073ad605f8
DiceLoss
import torch import torch.nn as nn class DiceLoss(nn.Module): def __init__(self, ignore_target=-1): super().__init__() self.ignore_target = ignore_target def forward(self, input, target): """ :param input: (N), logit :param target: (N), {0, 1} :return: """ input = torch.sigmoid(input.view(-1)) target = target.float().view(-1) mask = (target != self.ignore_target).float() return 1.0 - (torch.min(input, target) * mask).sum() / torch.clamp(( torch.max(input, target) * mask).sum(), min=1.0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = triton_helpers.minimum(tmp1, tmp2) tmp4 = -1.0 tmp5 = tmp2 != tmp4 tmp6 = tmp5.to(tl.float32) tmp7 = tmp3 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = triton_helpers.maximum(tmp1, tmp2) tmp12 = tmp11 * tmp6 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1.0 tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp18 = tmp10 / tmp17 tmp19 = tmp16 - tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0[ grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class DiceLossNew(nn.Module): def __init__(self, ignore_target=-1): super().__init__() self.ignore_target = ignore_target def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hlesmqh/WS3D
DiceLoss
false
15,522
[ "MIT" ]
100
6816eeb135923a59de34ee5d94be2d0fd3ec83f9
https://github.com/hlesmqh/WS3D/tree/6816eeb135923a59de34ee5d94be2d0fd3ec83f9
LossPredictionLoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from math import sqrt as sqrt from itertools import product as product class LossPredictionLoss(nn.Module): def __init__(self, margin=1.0): super(LossPredictionLoss, self).__init__() self.margin = margin def forward(self, input, target): input = (input - input.flip(0))[:len(input) // 2] target = (target - target.flip(0))[:len(target) // 2] target = target.detach() one = 2 * torch.sign(torch.clamp(target, min=0)) - 1 loss = torch.sum(torch.clamp(self.margin - one * input, min=0)) loss = loss / input.size(0) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 r1 = rindex // 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr0 + (192 + r0 + -64 * r1), None) tmp16 = tl.load(in_ptr1 + r2, None) tmp17 = tl.load(in_ptr1 + (192 + r0 + -64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = tmp5 < tmp4 tmp7 = tmp6.to(tl.int8) tmp8 = tmp4 < tmp5 tmp9 = tmp8.to(tl.int8) tmp10 = tmp7 - tmp9 tmp11 = tmp10.to(tmp4.dtype) tmp12 = 2.0 tmp13 = tmp11 * tmp12 tmp14 = 1.0 tmp15 = tmp13 - tmp14 tmp18 = tmp16 - tmp17 tmp19 = tmp15 * tmp18 tmp20 = tmp14 - tmp19 tmp21 = triton_helpers.maximum(tmp20, tmp3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = 0.5 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class LossPredictionLossNew(nn.Module): def __init__(self, margin=1.0): super(LossPredictionLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hilman-dayo/active_learning
LossPredictionLoss
false
15,523
[ "Apache-2.0" ]
54
cc5b0388be25946e794d59d95e4d9c8c56e24207
https://github.com/hilman-dayo/active_learning/tree/cc5b0388be25946e794d59d95e4d9c8c56e24207
PPReLU
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class PPReLU(nn.Module): def __init__(self, nchannels): super().__init__() self.scale1 = Scale(nchannels, bias=False, init_scale=1.0) self.scale2 = Scale(nchannels, bias=False, init_scale=0.1) self.nchannels = nchannels def forward(self, x): x1 = self.scale1(x) x2 = self.scale2(x) y = torch.max(x1, x2) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_maximum_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = triton_helpers.maximum(tmp2, tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_maximum_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class PPReLUNew(nn.Module): def __init__(self, nchannels): super().__init__() self.scale1 = Scale(nchannels, bias=False, init_scale=1.0) self.scale2 = Scale(nchannels, bias=False, init_scale=0.1) self.nchannels = nchannels def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_1 = self.scale1.weight primals_3 = self.scale2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
PPReLU
false
15,524
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
Scale
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class ScaleNew(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
Scale
false
15,525
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
PositionEmbedding2D
import logging import torch import torch.nn as nn def get_root_logger(log_file=None, log_level=logging.INFO): """Use `get_logger` method in mmcv to get the root logger. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmpose". Args: log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger. log_level (int): The root logger level. Note that only the process of rank 0 is affected, while other processes will set the level to "Error" and be silent most of the time. Returns: logging.Logger: The root logger. """ return get_logger(__name__.split('.')[0], log_file, log_level) class PositionEmbedding2D(nn.Module): """2D Postion Embedding layer. """ def __init__(self, max_position_embeddings=128, embedding_dim=128, width_embedding=False, height_embedding=False): """ Args: max_position_embeddings (int): max normalized input dimension (similar to vocab_size). embedding_dim (int): size of embedding vector. width_embedding (bool): whether to include width embedding. height_embedding (bool): whether to include height embedding. """ super().__init__() self.max_position_embeddings = max_position_embeddings self.pos_embedding_dim = embedding_dim self.x_embedding = nn.Embedding(self.max_position_embeddings, self. pos_embedding_dim) self.y_embedding = nn.Embedding(self.max_position_embeddings, self. pos_embedding_dim) self.width_embedding = None if width_embedding: self.width_embedding = nn.Embedding(self. max_position_embeddings, self.pos_embedding_dim) self.height_embedding = None if height_embedding: self.height_embedding = nn.Embedding(self. max_position_embeddings, self.pos_embedding_dim) self.pos_input_proj = nn.Linear(self.pos_embedding_dim, self. pos_embedding_dim) self.pos_input_proj_relu = nn.ReLU() def init_weights(self, pretrained=None): """ Weight initialization Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() logger.info('Position Embedding:') load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: return else: raise TypeError('pretrained must be a str or None') @property def with_width_embedding(self): """ Returns: Determine the model with the width_embedding or not """ return hasattr(self, 'width_embedding' ) and self.width_embedding is not None @property def with_height_embedding(self): """ Returns: Determine the model with the height_embedding or not """ return hasattr(self, 'height_embedding' ) and self.height_embedding is not None def forward(self, gt_bboxes): """ Forward computation Args: gt_bboxes (Tensor): bboxes Tensor, in shape of [B x N x 4] Returns: Tensor: bboxes/ layout embeddings, in shape of [B x N x C] """ gt_bboxes = torch.clamp(gt_bboxes * self.max_position_embeddings, 0, self.max_position_embeddings - 1).long() left_position_embeddings = self.x_embedding(gt_bboxes[:, :, 0]) upper_position_embeddings = self.y_embedding(gt_bboxes[:, :, 1]) right_position_embeddings = self.x_embedding(gt_bboxes[:, :, 2]) lower_position_embeddings = self.y_embedding(gt_bboxes[:, :, 3]) sum_position_embedding = (left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings) if self.with_width_embedding: sum_position_embedding += self.width_embedding(gt_bboxes[:, :, 2] - gt_bboxes[:, :, 0]) if self.with_height_embedding: sum_position_embedding += self.height_embedding(gt_bboxes[:, :, 3] - gt_bboxes[:, :, 1]) sum_position_embedding = self.pos_input_proj_relu(self. pos_input_proj(sum_position_embedding)) return sum_position_embedding def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import logging import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 128.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 127.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp6.to(tl.int64) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 128 % 4 x2 = xindex // 512 x0 = xindex % 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 16 * x2), None, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), None, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), None, eviction_policy= 'evict_last') tmp1 = tl.full([XBLOCK], 128, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 128), 'index out of bounds: 0 <= tmp4 < 128') tmp6 = tl.load(in_ptr1 + (x0 + 128 * tmp4), None) tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 128), 'index out of bounds: 0 <= tmp10 < 128') tmp12 = tl.load(in_ptr2 + (x0 + 128 * tmp10), None) tmp13 = tmp6 + tmp12 tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tl.device_assert((0 <= tmp17) & (tmp17 < 128), 'index out of bounds: 0 <= tmp17 < 128') tmp19 = tl.load(in_ptr1 + (x0 + 128 * tmp17), None) tmp20 = tmp13 + tmp19 tmp22 = tmp21 + tmp1 tmp23 = tmp21 < 0 tmp24 = tl.where(tmp23, tmp22, tmp21) tl.device_assert((0 <= tmp24) & (tmp24 < 128), 'index out of bounds: 0 <= tmp24 < 128') tmp26 = tl.load(in_ptr2 + (x0 + 128 * tmp24), None) tmp27 = tmp20 + tmp26 tl.store(out_ptr0 + x4, tmp27, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (128, 128), (128, 1)) assert_size_stride(primals_3, (128, 128), (128, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_clamp_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) triton_poi_fused_add_embedding_1[grid(8192)](buf0, primals_2, primals_3, buf1, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(8192)](buf3, primals_5, buf4, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(buf0, (4, 4, 4), (64, 16, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (64, 16, 1), 4 ), reinterpret_tensor(buf0, (4, 4, 4), (64, 16, 1), 8 ), reinterpret_tensor(buf0, (4, 4, 4), (64, 16, 1), 12 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_4 def get_root_logger(log_file=None, log_level=logging.INFO): """Use `get_logger` method in mmcv to get the root logger. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmpose". Args: log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger. log_level (int): The root logger level. Note that only the process of rank 0 is affected, while other processes will set the level to "Error" and be silent most of the time. Returns: logging.Logger: The root logger. """ return get_logger(__name__.split('.')[0], log_file, log_level) class PositionEmbedding2DNew(nn.Module): """2D Postion Embedding layer. """ def __init__(self, max_position_embeddings=128, embedding_dim=128, width_embedding=False, height_embedding=False): """ Args: max_position_embeddings (int): max normalized input dimension (similar to vocab_size). embedding_dim (int): size of embedding vector. width_embedding (bool): whether to include width embedding. height_embedding (bool): whether to include height embedding. """ super().__init__() self.max_position_embeddings = max_position_embeddings self.pos_embedding_dim = embedding_dim self.x_embedding = nn.Embedding(self.max_position_embeddings, self. pos_embedding_dim) self.y_embedding = nn.Embedding(self.max_position_embeddings, self. pos_embedding_dim) self.width_embedding = None if width_embedding: self.width_embedding = nn.Embedding(self. max_position_embeddings, self.pos_embedding_dim) self.height_embedding = None if height_embedding: self.height_embedding = nn.Embedding(self. max_position_embeddings, self.pos_embedding_dim) self.pos_input_proj = nn.Linear(self.pos_embedding_dim, self. pos_embedding_dim) self.pos_input_proj_relu = nn.ReLU() def init_weights(self, pretrained=None): """ Weight initialization Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() logger.info('Position Embedding:') load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: return else: raise TypeError('pretrained must be a str or None') @property def with_width_embedding(self): """ Returns: Determine the model with the width_embedding or not """ return hasattr(self, 'width_embedding' ) and self.width_embedding is not None @property def with_height_embedding(self): """ Returns: Determine the model with the height_embedding or not """ return hasattr(self, 'height_embedding' ) and self.height_embedding is not None def forward(self, input_0): primals_2 = self.x_embedding.weight primals_3 = self.y_embedding.weight primals_4 = self.pos_input_proj.weight primals_5 = self.pos_input_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
hikopensource/DAVAR-Lab-OCR
PositionEmbedding2D
false
15,526
[ "Apache-2.0" ]
387
c65285f6668864cca7a12770ae4c8d083ea1cf1b
https://github.com/hikopensource/DAVAR-Lab-OCR/tree/c65285f6668864cca7a12770ae4c8d083ea1cf1b
BertAttention
from _paritybench_helpers import _mock_config import math import torch from typing import * from torch import nn import torch.utils.checkpoint class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self. max_position_embeddings - 1) positional_embedding = positional_embedding if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self. num_attention_heads, self.self.attention_head_size, self. pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len( heads) self.self.all_head_size = (self.self.attention_head_size * self. self.num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, position_embedding_type=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from typing import * from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_9 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_3, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_3, buf12, buf13, primals_10, primals_11, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_11 return buf14, primals_3, primals_10, buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8 class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self. max_position_embeddings - 1) positional_embedding = positional_embedding if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self. num_attention_heads, self.self.attention_head_size, self. pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len( heads) self.self.all_head_size = (self.self.attention_head_size * self. self.num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input_0): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_8 = self.output.dense.weight primals_9 = self.output.dense.bias primals_10 = self.output.LayerNorm.weight primals_11 = self.output.LayerNorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
hiaoxui/soft-prompts
BertAttention
false
15,527
[ "Apache-2.0" ]
48
214dbedf735fe1c98ab2be3a26066d50ff0a86d8
https://github.com/hiaoxui/soft-prompts/tree/214dbedf735fe1c98ab2be3a26066d50ff0a86d8
SigmoidFocalClassificationLoss
import torch import torch.nn as nn def _sigmoid_cross_entropy_with_logits(logits, labels): loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits) loss += torch.log1p(torch.exp(-torch.abs(logits))) return loss class SigmoidFocalClassificationLoss(nn.Module): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. all_zero_negative: bool. if True, will treat all zero as background. else, will treat first label as background. only affect alpha. """ super().__init__() self._alpha = alpha self._gamma = gamma def forward(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape [batch_size, num_anchors] class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ per_entry_cross_ent = _sigmoid_cross_entropy_with_logits(labels= target_tensor, logits=prediction_tensor) prediction_probabilities = torch.sigmoid(prediction_tensor) p_t = target_tensor * prediction_probabilities + (1 - target_tensor ) * (1 - prediction_probabilities) modulating_factor = 1.0 if self._gamma: modulating_factor = torch.pow(1.0 - p_t, self._gamma) alpha_weight_factor = 1.0 if self._alpha is not None: alpha_weight_factor = target_tensor * self._alpha + (1 - target_tensor) * (1 - self._alpha) focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * per_entry_cross_ent) return focal_cross_entropy_loss * weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0( in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp27 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp0 tmp6 = tmp4 - tmp2 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp9 = tmp4 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = 0.25 tmp12 = tmp0 * tmp11 tmp13 = 0.75 tmp14 = tmp5 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp10 * tmp15 tmp17 = 0.0 tmp18 = triton_helpers.maximum(tmp1, tmp17) tmp19 = tmp1 * tmp0 tmp20 = tmp18 - tmp19 tmp21 = tl_math.abs(tmp1) tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = libdevice.log1p(tmp23) tmp25 = tmp20 + tmp24 tmp26 = tmp16 * tmp25 tmp28 = tmp26 * tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0[ grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, def _sigmoid_cross_entropy_with_logits(logits, labels): loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits) loss += torch.log1p(torch.exp(-torch.abs(logits))) return loss class SigmoidFocalClassificationLossNew(nn.Module): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. all_zero_negative: bool. if True, will treat all zero as background. else, will treat first label as background. only affect alpha. """ super().__init__() self._alpha = alpha self._gamma = gamma def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
hlesmqh/WS3D
SigmoidFocalClassificationLoss
false
15,528
[ "MIT" ]
100
6816eeb135923a59de34ee5d94be2d0fd3ec83f9
https://github.com/hlesmqh/WS3D/tree/6816eeb135923a59de34ee5d94be2d0fd3ec83f9
MarginRankingLoss
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from math import sqrt as sqrt from itertools import product as product class MarginRankingLoss(nn.Module): def __init__(self, margin=1.0): super(MarginRankingLoss, self).__init__() self.margin = margin def forward(self, inputs, targets): random = torch.randperm(inputs.size(0)) inputs[random] pred_lossi = inputs[:inputs.size(0) // 2] pred_lossj = inputs[inputs.size(0) // 2:] target_loss = targets.reshape(inputs.size(0), 1) target_loss = target_loss[random] target_lossi = target_loss[:inputs.size(0) // 2] target_lossj = target_loss[inputs.size(0) // 2:] final_target = torch.sign(target_lossi - target_lossj) return F.margin_ranking_loss(pred_lossi, pred_lossj, final_target, margin=self.margin, reduction='mean') def get_inputs(): return [torch.rand([4, 1]), torch.rand([4, 1])] def get_init_inputs(): return [[], {}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr0 + (2 + r0), None) tmp22 = tl.load(in_ptr2 + r0, None) tmp23 = tl.load(in_ptr2 + (2 + r0), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + tmp4, None, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 4), 'index out of bounds: 0 <= tmp10 < 4') tmp12 = tl.load(in_ptr1 + tmp10, None, eviction_policy='evict_last') tmp13 = tmp6 - tmp12 tmp14 = tl.full([1, 1], 0, tl.int32) tmp15 = tmp14 < tmp13 tmp16 = tmp15.to(tl.int8) tmp17 = tmp13 < tmp14 tmp18 = tmp17.to(tl.int8) tmp19 = tmp16 - tmp18 tmp20 = tmp19.to(tmp13.dtype) tmp21 = -tmp20 tmp24 = tmp22 - tmp23 tmp25 = tmp21 * tmp24 tmp26 = 1.0 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = 2.0 tmp34 = tmp32 / tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 1), (1, 1)) assert_size_stride(arg1_1, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randperm.default(4, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0[grid(1)](buf3, buf1, arg1_1, arg0_1, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 return buf3, class MarginRankingLossNew(nn.Module): def __init__(self, margin=1.0): super(MarginRankingLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hilman-dayo/active_learning
MarginRankingLoss
false
15,529
[ "Apache-2.0" ]
54
cc5b0388be25946e794d59d95e4d9c8c56e24207
https://github.com/hilman-dayo/active_learning/tree/cc5b0388be25946e794d59d95e4d9c8c56e24207
RollRev
import torch from torch import nn def roll(x, step, axis): shape = x.shape for i, s in enumerate(step): if s >= 0: x1 = x.narrow(axis[i], 0, s) x2 = x.narrow(axis[i], s, shape[axis[i]] - s) else: x2 = x.narrow(axis[i], shape[axis[i]] + s, -s) x1 = x.narrow(axis[i], 0, shape[axis[i]] + s) x = torch.cat([x2, x1], axis[i]) return x class RollRev(nn.Module): def __init__(self, step, axis): super(RollRev, self).__init__() if not isinstance(step, list): assert not isinstance(axis, list) step = [step] axis = [axis] assert len(step) == len(axis) self.step = step self.axis = axis def forward(self, x): return roll(x, self.step, self.axis) def reverse(self, x): return roll(x, [(-i) for i in self.step], self.axis) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'step': 4, 'axis': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def roll(x, step, axis): shape = x.shape for i, s in enumerate(step): if s >= 0: x1 = x.narrow(axis[i], 0, s) x2 = x.narrow(axis[i], s, shape[axis[i]] - s) else: x2 = x.narrow(axis[i], shape[axis[i]] + s, -s) x1 = x.narrow(axis[i], 0, shape[axis[i]] + s) x = torch.cat([x2, x1], axis[i]) return x class RollRevNew(nn.Module): def __init__(self, step, axis): super(RollRevNew, self).__init__() if not isinstance(step, list): assert not isinstance(axis, list) step = [step] axis = [axis] assert len(step) == len(axis) self.step = step self.axis = axis def reverse(self, x): return roll(x, [(-i) for i in self.step], self.axis) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hongyehu/NeuralRG
RollRev
false
15,530
[ "Apache-2.0" ]
65
ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8
https://github.com/hongyehu/NeuralRG/tree/ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8
Reorg
import torch import torch.nn as nn class Reorg(nn.Module): """ This layer reorganizes a tensor according to a stride. The dimensions 2,3 will be sliced by the stride and then stacked in dimension 1. (input must have 4 dimensions) Args: stride (int): stride to divide the input tensor """ def __init__(self, stride=2): super(Reorg, self).__init__() if not isinstance(stride, int): raise TypeError(f'stride is not an int [{type(stride)}]') self.stride = stride self.darknet = True def __repr__(self): return ( f'{self.__class__.__name__} (stride={self.stride}, darknet_compatible_mode={self.darknet})' ) def forward(self, x): assert x.data.dim() == 4 B = x.data.size(0) C = x.data.size(1) H = x.data.size(2) W = x.data.size(3) if H % self.stride != 0: raise ValueError( f'Dimension mismatch: {H} is not divisible by {self.stride}') if W % self.stride != 0: raise ValueError( f'Dimension mismatch: {W} is not divisible by {self.stride}') if self.darknet: x = x.view(B, C // self.stride ** 2, H, self.stride, W, self.stride ).contiguous() x = x.permute(0, 3, 5, 1, 2, 4).contiguous() x = x.view(B, -1, H // self.stride, W // self.stride) else: ws, hs = self.stride, self.stride x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(3, 4 ).contiguous() x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(2, 3 ).contiguous() x = x.view(B, C, hs * ws, H // hs, W // ws).transpose(1, 2 ).contiguous() x = x.view(B, hs * ws * C, H // hs, W // ws) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex % 4 x4 = xindex // 4 y0 = yindex % 2 y1 = yindex // 2 % 2 y2 = yindex // 4 x6 = xindex y5 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x3 + 8 * y1 + 16 * x4 + 64 * y2), xmask & ymask) tl.store(out_ptr0 + (x6 + 16 * y5), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 2, 1, 4, 4), (64, 32, 16, 16, 4, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), class ReorgNew(nn.Module): """ This layer reorganizes a tensor according to a stride. The dimensions 2,3 will be sliced by the stride and then stacked in dimension 1. (input must have 4 dimensions) Args: stride (int): stride to divide the input tensor """ def __init__(self, stride=2): super(ReorgNew, self).__init__() if not isinstance(stride, int): raise TypeError(f'stride is not an int [{type(stride)}]') self.stride = stride self.darknet = True def __repr__(self): return ( f'{self.__class__.__name__} (stride={self.stride}, darknet_compatible_mode={self.darknet})' ) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
Reorg
false
15,531
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
DiceLoss
import torch import torch.nn as nn class DiceLoss(nn.Module): def __init__(self, eps=1e-06): super().__init__() assert isinstance(eps, float) self.eps = eps def forward(self, pred, target, mask=None): pred = pred.contiguous().view(pred.size()[0], -1) target = target.contiguous().view(target.size()[0], -1) if mask is not None: mask = mask.contiguous().view(mask.size()[0], -1) pred = pred * mask target = target * mask a = torch.sum(pred * target) b = torch.sum(pred) c = torch.sum(target) d = 2 * a / (b + c + self.eps) return 1 - d def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = tmp8 + tmp11 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tmp13 / tmp16 tmp18 = 1.0 tmp19 = tmp18 - tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class DiceLossNew(nn.Module): def __init__(self, eps=1e-06): super().__init__() assert isinstance(eps, float) self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hongxuenong/mmocr
DiceLoss
false
15,532
[ "Apache-2.0" ]
2,261
e8e3a059f8f2e4fca96af37751c33563fc48e2ba
https://github.com/hongxuenong/mmocr/tree/e8e3a059f8f2e4fca96af37751c33563fc48e2ba
MultiHeadAttn
import torch import torch.cuda from torch.nn import functional as F from torch import nn import torch.distributed import torch.utils.data import torch.optim class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) def forward(self, inp, attn_mask=None): return self._forward(inp, attn_mask) def _forward(self, inp, attn_mask=None): residual = inp if self.pre_lnorm: inp = self.layer_norm(inp) n_head, d_head = self.n_head, self.d_head head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2) head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head) head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head) head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head) q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) attn_score = torch.bmm(q, k.transpose(1, 2)) attn_score.mul_(self.scale) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(1) attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1) attn_score.masked_fill_(attn_mask, -float('inf')) attn_prob = F.softmax(attn_score, dim=2) attn_prob = self.dropatt(attn_prob) attn_vec = torch.bmm(attn_prob, v) attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head) attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size( 0), inp.size(1), n_head * d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: output = residual + attn_out else: output = self.layer_norm(residual + attn_out) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.cuda from torch.nn import functional as F from torch import nn import torch.distributed import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (32 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask) tmp1 = tl.load(in_ptr1 + (32 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask) tmp1 = tl.load(in_ptr1 + (16 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (48, 4), (4, 1)) assert_size_stride(primals_3, (48,), (1,)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 48), (48, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 48), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](buf0, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf0, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf9, buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_6 return buf12, primals_1, primals_5, buf6, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0) class MultiHeadAttnNew(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttnNew, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) def _forward(self, inp, attn_mask=None): residual = inp if self.pre_lnorm: inp = self.layer_norm(inp) n_head, d_head = self.n_head, self.d_head head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2) head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head) head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head) head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head) q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) attn_score = torch.bmm(q, k.transpose(1, 2)) attn_score.mul_(self.scale) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(1) attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1) attn_score.masked_fill_(attn_mask, -float('inf')) attn_prob = F.softmax(attn_score, dim=2) attn_prob = self.dropatt(attn_prob) attn_vec = torch.bmm(attn_prob, v) attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head) attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size( 0), inp.size(1), n_head * d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: output = residual + attn_out else: output = self.layer_norm(residual + attn_out) return output def forward(self, input_0): primals_2 = self.qkv_net.weight primals_3 = self.qkv_net.bias primals_4 = self.o_net.weight primals_5 = self.layer_norm.weight primals_6 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
hamjam/NeMo
MultiHeadAttn
false
15,533
[ "Apache-2.0" ]
4,145
b3484d32e1317666151f931bfa39867d88ed8658
https://github.com/hamjam/NeMo/tree/b3484d32e1317666151f931bfa39867d88ed8658
PLU
import torch import torch.nn as nn class PLU(nn.Module): """ y = max(alpha*(x+c)−c, min(alpha*(x−c)+c, x)) from PLU: The Piecewise Linear Unit Activation Function """ def __init__(self, alpha=0.1, c=1): super().__init__() self.alpha = alpha self.c = c def forward(self, x): x1 = self.alpha * (x + self.c) - self.c x2 = self.alpha * (x - self.c) + self.c min1 = torch.min(x2, x) min2 = torch.max(x1, min1) return min2 def __repr__(self): s = '{name} ({alhpa}, {c})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_maximum_minimum_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 - tmp1 tmp6 = tmp0 - tmp1 tmp7 = tmp6 * tmp3 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.minimum(tmp8, tmp0) tmp10 = triton_helpers.maximum(tmp5, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_maximum_minimum_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PLUNew(nn.Module): """ y = max(alpha*(x+c)−c, min(alpha*(x−c)+c, x)) from PLU: The Piecewise Linear Unit Activation Function """ def __init__(self, alpha=0.1, c=1): super().__init__() self.alpha = alpha self.c = c def __repr__(self): s = '{name} ({alhpa}, {c})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hilman-dayo/ObjectDetection-OneStageDet
PLU
false
15,534
[ "MIT" ]
331
44054ad335e24e99a98fdad0d18b9bf3a80c941c
https://github.com/hilman-dayo/ObjectDetection-OneStageDet/tree/44054ad335e24e99a98fdad0d18b9bf3a80c941c
GraphConv
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init class MeanAggregator(nn.Module): def forward(self, features, A): x = torch.bmm(A, features) return x class GraphConv(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim)) self.bias = nn.Parameter(torch.FloatTensor(out_dim)) init.xavier_uniform_(self.weight) init.constant_(self.bias, 0) self.aggregator = MeanAggregator() def forward(self, features, A): _b, _n, d = features.shape assert d == self.in_dim agg_feats = self.aggregator(features, A) cat_feats = torch.cat([features, agg_feats], dim=2) out = torch.einsum('bnd,df->bnf', cat_feats, self.weight) out = F.relu(out + self.bias) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (8, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_2, primals_1, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = reinterpret_tensor(buf0, (1, 16, 4), (64, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 8), (0, 8, 1), 0), reinterpret_tensor(primals_3, (1, 8, 4), (32, 4, 1), 0), out=buf2) del primals_3 buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf3, primals_4, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf3, buf4, reinterpret_tensor(buf1, (1, 8, 16), (128, 1, 8), 0) class MeanAggregator(nn.Module): def forward(self, features, A): x = torch.bmm(A, features) return x class GraphConvNew(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim)) self.bias = nn.Parameter(torch.FloatTensor(out_dim)) init.xavier_uniform_(self.weight) init.constant_(self.bias, 0) self.aggregator = MeanAggregator() def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
hongxuenong/mmocr
GraphConv
false
15,535
[ "Apache-2.0" ]
2,261
e8e3a059f8f2e4fca96af37751c33563fc48e2ba
https://github.com/hongxuenong/mmocr/tree/e8e3a059f8f2e4fca96af37751c33563fc48e2ba
AppendLayer
import torch import numpy as np import torch.nn as nn class AppendLayer(nn.Module): def __init__(self, noise=0.001, *args, **kwargs): super().__init__(*args, **kwargs) self.log_var = nn.Parameter(torch.DoubleTensor(1, 1)) nn.init.constant_(self.log_var, val=np.log(noise)) def forward(self, x): return torch.cat((x, self.log_var * torch.ones_like(x)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp12 = tl.load(in_ptr1 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp5.to(tl.float64) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.full([1], 1.0, tl.float64) tmp15 = tmp13 * tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp8, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 1), (1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float64) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_2, primals_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class AppendLayerNew(nn.Module): def __init__(self, noise=0.001, *args, **kwargs): super().__init__(*args, **kwargs) self.log_var = nn.Parameter(torch.DoubleTensor(1, 1)) nn.init.constant_(self.log_var, val=np.log(noise)) def forward(self, input_0): primals_1 = self.log_var primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
hssandriss/pybnn
AppendLayer
false
15,536
[ "BSD-3-Clause" ]
110
e878553a24ce9ebdde9088f285c7f292e4ee8885
https://github.com/hssandriss/pybnn/tree/e878553a24ce9ebdde9088f285c7f292e4ee8885
ConvolutionLayer
import torch import torch.nn as nn class ConvolutionLayer(nn.Module): def __init__(self, channels, filters, kernel_size, stride=1, dilation=1): super(ConvolutionLayer, self).__init__() padding = kernel_size // 2 padding += padding * (dilation - 1) self.conv = nn.Conv1d(channels, filters, kernel_size, stride=stride, dilation=dilation, padding=padding) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'filters': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(20)](buf1, primals_2, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 5), (5, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0) class ConvolutionLayerNew(nn.Module): def __init__(self, channels, filters, kernel_size, stride=1, dilation=1): super(ConvolutionLayerNew, self).__init__() padding = kernel_size // 2 padding += padding * (dilation - 1) self.conv = nn.Conv1d(channels, filters, kernel_size, stride=stride, dilation=dilation, padding=padding) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
huak95/attacut
ConvolutionLayer
false
15,537
[ "MIT" ]
54
100333931023cd009daeddec0cba4cdfce3d0b68
https://github.com/huak95/attacut/tree/100333931023cd009daeddec0cba4cdfce3d0b68
rbbox_corners_aligned
import torch import torch.nn as nn class rbbox_corners_aligned(nn.Module): def _init_(self, gboxes): super(rbbox_corners_aligned, self)._init_() self.corners_gboxes = gboxes return def forward(ctx, gboxes): N = gboxes.shape[0] center_x = gboxes[:, 0] center_y = gboxes[:, 1] x_d = gboxes[:, 2] y_d = gboxes[:, 3] corners = torch.zeros([N, 2, 4], device=gboxes.device, dtype=torch. float32) corners[:, 0, 0] = x_d.mul(-0.5) corners[:, 1, 0] = y_d.mul(-0.5) corners[:, 0, 1] = x_d.mul(-0.5) corners[:, 1, 1] = y_d.mul(0.5) corners[:, 0, 2] = x_d.mul(0.5) corners[:, 1, 2] = y_d.mul(0.5) corners[:, 0, 3] = x_d.mul(0.5) corners[:, 1, 3] = y_d.mul(-0.5) b = center_x.unsqueeze(1).repeat(1, 4).unsqueeze(1) c = center_y.unsqueeze(1).repeat(1, 4).unsqueeze(1) return corners + torch.cat((b, c), 1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_copy_mul_zeros_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 2 x0 = xindex % 4 x2 = xindex // 8 x4 = xindex tmp5 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x0 tmp4 = tmp3 == tmp1 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = tmp1 == tmp8 tmp11 = -0.5 tmp12 = tmp10 * tmp11 tmp13 = tmp8 == tmp1 tmp14 = tmp3 == tmp8 tmp15 = tmp5 * tmp11 tmp16 = 0.0 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.where(tmp9, tmp17, tmp16) tmp19 = tl.where(tmp14, tmp15, tmp18) tmp20 = tmp8 == tmp8 tmp21 = tl.where(tmp20, tmp17, tmp16) tmp22 = tl.where(tmp13, tmp19, tmp21) tmp23 = tl.where(tmp4, tmp12, tmp22) tmp24 = tmp1 == tmp1 tmp25 = tl.where(tmp24, tmp19, tmp18) tmp26 = tl.where(tmp9, tmp23, tmp25) tmp27 = tl.where(tmp4, tmp7, tmp26) tmp28 = tmp0 == tmp8 tmp29 = tl.where(tmp28, tmp17, tmp16) tmp30 = tl.where(tmp2, tmp19, tmp29) tmp31 = tl.where(tmp28, tmp23, tmp30) tmp32 = tl.where(tmp2, tmp27, tmp31) tl.store(out_ptr0 + x4, tmp32, xmask) @triton.jit def triton_poi_fused_copy_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 2 x0 = xindex % 4 x2 = xindex // 8 x4 = xindex tmp6 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (4 + x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + x4, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x0 tmp4 = tl.full([1], 2, tl.int32) tmp5 = tmp3 == tmp4 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = tmp1 == tmp9 tmp12 = tmp11 * tmp7 tmp14 = tl.where(tmp5, tmp12, tmp13) tmp16 = tl.where(tmp10, tmp14, tmp15) tmp17 = tl.where(tmp5, tmp8, tmp16) tmp18 = tmp0 == tmp9 tmp20 = tl.where(tmp18, tmp14, tmp19) tmp21 = tl.where(tmp2, tmp17, tmp20) tl.store(out_ptr0 + x4, tmp21, xmask) @triton.jit def triton_poi_fused_add_cat_copy_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 2 x0 = xindex % 4 x2 = xindex // 8 x4 = xindex tmp6 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (4 + x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + x4, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x0 tmp4 = tl.full([1], 3, tl.int32) tmp5 = tmp3 == tmp4 tmp7 = -0.5 tmp8 = tmp6 * tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = tmp1 == tmp9 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp15 = tl.where(tmp5, tmp13, tmp14) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp5, tmp8, tmp17) tmp19 = tmp0 == tmp9 tmp21 = tl.where(tmp19, tmp15, tmp20) tmp22 = tl.where(tmp2, tmp18, tmp21) tl.full([1], 0, tl.int64) tmp25 = tl.full([1], 1, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tl.load(in_ptr0 + 4 * x2, tmp26 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tmp0 >= tmp25 tl.full([1], 2, tl.int64) tmp31 = tl.load(in_ptr0 + (1 + 4 * x2), tmp28 & xmask, eviction_policy= 'evict_last', other=0.0) tmp32 = tl.where(tmp26, tmp27, tmp31) tmp33 = tmp22 + tmp32 tl.store(out_ptr0 + x4, tmp33, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_mul_zeros_0[grid(32)](arg0_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_copy_mul_1[grid(32)](arg0_1, buf0, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused_add_cat_copy_mul_2[grid(32)](arg0_1, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del arg0_1 del buf1 return buf2, class rbbox_corners_alignedNew(nn.Module): def _init_(self, gboxes): super(rbbox_corners_alignedNew, self)._init_() self.corners_gboxes = gboxes return def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hlesmqh/WS3D
rbbox_corners_aligned
false
15,538
[ "MIT" ]
100
6816eeb135923a59de34ee5d94be2d0fd3ec83f9
https://github.com/hlesmqh/WS3D/tree/6816eeb135923a59de34ee5d94be2d0fd3ec83f9
GCN
from torch.nn import Module import math import torch from torch import nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj, ismlp=False): if len(input.shape) == 3: B = input.shape[0] N = input.shape[1] support = torch.matmul(input, self.weight) if ismlp: return support if self.bias is None else support + self.bias support = support.transpose(0, 1).reshape(N, B * self.out_features) output = torch.spmm(adj, support) output = output.reshape(N, B, self.out_features).transpose(0, 1) else: support = torch.mm(input, self.weight) if ismlp: return support if self.bias is None else support + self.bias output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math from torch import nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1, out=buf4) del primals_6 buf5 = buf3 del buf3 triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj, ismlp=False): if len(input.shape) == 3: B = input.shape[0] N = input.shape[1] support = torch.matmul(input, self.weight) if ismlp: return support if self.bias is None else support + self.bias support = support.transpose(0, 1).reshape(N, B * self.out_features) output = torch.spmm(adj, support) output = output.reshape(N, B, self.out_features).transpose(0, 1) else: support = torch.mm(input, self.weight) if ismlp: return support if self.bias is None else support + self.bias output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
hongfz16/Garment4D
GCN
false
15,539
[ "MIT" ]
89
9317dc262f3d35eb9e6cd6a7bfbb29f04560ca35
https://github.com/hongfz16/Garment4D/tree/9317dc262f3d35eb9e6cd6a7bfbb29f04560ca35
Joiner
import torch from torch import nn import torch.nn.functional as F class Joiner(nn.Module): def __init__(self, input_dim: 'int', output_dim: 'int'): super().__init__() self.output_linear = nn.Linear(input_dim, output_dim) def forward(self, encoder_out: 'torch.Tensor', decoder_out: 'torch.Tensor' ) ->torch.Tensor: """ Args: encoder_out: Output from the encoder. Its shape is (N, T, C). decoder_out: Output from the decoder. Its shape is (N, U, C). Returns: Return a tensor of shape (N, T, U, C). """ assert encoder_out.ndim == decoder_out.ndim == 3 assert encoder_out.size(0) == decoder_out.size(0) assert encoder_out.size(2) == decoder_out.size(2) encoder_out = encoder_out.unsqueeze(2) decoder_out = decoder_out.unsqueeze(1) logit = encoder_out + decoder_out logit = F.relu(logit) output = self.output_linear(logit) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex // 16 x3 = xindex // 64 x5 = xindex % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x6, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_relu_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class JoinerNew(nn.Module): def __init__(self, input_dim: 'int', output_dim: 'int'): super().__init__() self.output_linear = nn.Linear(input_dim, output_dim) def forward(self, input_0, input_1): primals_3 = self.output_linear.weight primals_4 = self.output_linear.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
huangruizhe/icefall
Joiner
false
15,541
[ "Apache-2.0" ]
173
ea8af0ee9af5169d93f8f389ffebbc27a1d9e82a
https://github.com/huangruizhe/icefall/tree/ea8af0ee9af5169d93f8f389ffebbc27a1d9e82a
Squeezing
import torch from torch import nn class Squeezing(nn.Module): def __init__(self, filterSize=2): super(Squeezing, self).__init__() self.filterSize = filterSize def forward(self, input): scale_factor = self.filterSize batch_size, in_channels, in_height, in_width = input.size() out_channels = int(in_channels // (scale_factor * scale_factor)) out_height = int(in_height * scale_factor) out_width = int(in_width * scale_factor) if scale_factor >= 1: input_view = input.contiguous().view(batch_size, out_channels, scale_factor, scale_factor, in_height, in_width) shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous() else: block_size = int(1 / scale_factor) input_view = input.contiguous().view(batch_size, in_channels, out_height, block_size, out_width, block_size) shuffle_out = input_reshape.permute(0, 1, 3, 5, 2, 4).contiguous() return shuffle_out.reshape(batch_size, out_channels, out_height, out_width) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex y0 = yindex % 4 y1 = yindex // 4 % 2 y2 = yindex // 8 % 4 y3 = yindex // 32 y5 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * y2 + 16 * x4 + 32 * y1 + 64 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4 + 2 * y5), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 2, 4, 2), (64, 64, 16, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(128, 2)](arg0_1, buf0, 128, 2, XBLOCK =2, YBLOCK=64, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 8, 8), (64, 64, 8, 1), 0), class SqueezingNew(nn.Module): def __init__(self, filterSize=2): super(SqueezingNew, self).__init__() self.filterSize = filterSize def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hongyehu/NeuralRG
Squeezing
false
15,542
[ "Apache-2.0" ]
65
ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8
https://github.com/hongyehu/NeuralRG/tree/ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8
Warp
import torch from torch import Tensor import torch.nn as nn import torch.nn.functional as F def coords_grid(flow: 'Tensor') ->Tensor: """Generate shifted coordinate grid based based input flow. Args: flow (Tensor): Estimated optical flow. Returns: Tensor: The coordinate that shifted by input flow and scale in the range [-1, 1]. """ B, _, H, W = flow.shape xx = torch.arange(0, W, device=flow.device, requires_grad=False) yy = torch.arange(0, H, device=flow.device, requires_grad=False) coords = torch.meshgrid(yy, xx) coords = torch.stack(coords[::-1], dim=0).float() grid = coords[None].repeat(B, 1, 1, 1) + flow grid[:, 0, ...] = grid[:, 0, ...] * 2.0 / max(W - 1, 1) - 1.0 grid[:, 1, ...] = grid[:, 1, ...] * 2.0 / max(H - 1, 1) - 1.0 grid = grid.permute(0, 2, 3, 1) return grid class Warp(nn.Module): """Warping layer to warp feature using optical flow. Args: mode (str): interpolation mode to calculate output values. Options are 'bilinear' and 'nearest'. Defaults to 'bilinear'. padding_mode (str): padding mode for outside grid values. Options are 'zero', 'border' and 'reflection'. Defaults to 'zeros'. align_corners (bool): If set to True, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If set to False, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default to False. """ def __init__(self, mode: 'str'='bilinear', padding_mode: 'str'='zeros', align_corners: 'bool'=False, use_mask: 'bool'=True) ->None: super().__init__() self.mode = mode self.padding_mode = padding_mode self.align_corners = align_corners self.use_mask = use_mask def forward(self, feat: 'Tensor', flow: 'Tensor') ->Tensor: """Forward function for warp. Args: feat (Tensor): Input feature flow (Tensor): Input optical flow. Returns: Tensor: The output feature that was generated by warping input feature based input flow. """ grid = coords_grid(flow) out = F.grid_sample(feat, grid, mode=self.mode, padding_mode=self. padding_mode, align_corners=self.align_corners) mask = torch.ones(feat.size(), device=feat.device, requires_grad=False) if self.use_mask: mask = F.grid_sample(mask, grid, mode=self.mode, padding_mode= self.padding_mode, align_corners=self.align_corners) mask = (mask > 0.9999).float() return out * mask def __repr__(self): s = self.__class__.__name__ s += f'(mode={self.mode}, ' s += f'padding_mode={self.padding_mode}, ' s += f'align_corners={self.align_corners},' s += f'use_mask={self.use_mask})' return s def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_copy_div_mul_repeat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 2 x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex // 32 x5 = xindex % 16 x6 = xindex // 4 % 8 x7 = xindex tmp19 = tl.load(in_ptr0 + (x5 + 32 * x3), xmask, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr0 + x7, xmask) tmp0 = x2 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x1 tl.full([1], 0, tl.int64) tmp6 = tl.full([1], 4, tl.int64) tmp7 = tmp3 < tmp6 tmp8 = x0 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 >= tmp6 tl.full([1], 8, tl.int64) tmp14 = -4 + x1 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp11, tmp14, tmp15) tmp17 = tl.where(tmp7, tmp10, tmp16) tmp18 = tmp17.to(tl.float32) tmp20 = tmp18 + tmp19 tmp21 = 2.0 tmp22 = tmp20 * tmp21 tmp23 = 0.3333333333333333 tmp24 = tmp22 * tmp23 tmp25 = 1.0 tmp26 = tmp24 - tmp25 tmp27 = x6 tmp29 = tmp27 < tmp6 tmp30 = tl.where(tmp29, tmp8, tmp9) tmp31 = tmp27 >= tmp6 tmp33 = -4 + x1 + 4 * x2 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp30, tmp35) tmp37 = tmp36.to(tl.float32) tmp39 = tmp37 + tmp38 tmp40 = tl.where(tmp2, tmp26, tmp39) tl.store(out_ptr0 + x7, tmp40, xmask) @triton.jit def triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1(in_out_ptr5, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex x4 = xindex // 16 tmp3 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp0 = tl.full([1], 0, tl.int32) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = 0.3333333333333333 tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp7 - tmp8 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp11 * tmp4 tmp13 = 1.5 tmp14 = tmp12 + tmp13 tmp15 = libdevice.floor(tmp14) tmp16 = tmp15 + tmp8 tmp17 = 4.0 tmp18 = tmp16 < tmp17 tmp19 = tmp1 == tmp1 tmp20 = tl.where(tmp19, tmp9, tmp3) tmp21 = tmp20 * tmp4 tmp22 = tmp21 + tmp13 tmp23 = libdevice.floor(tmp22) tmp24 = tmp23 + tmp8 tmp25 = 0.0 tmp26 = tmp24 >= tmp25 tmp27 = tmp24 < tmp17 tmp28 = tmp26 & tmp27 tmp29 = tmp18 & tmp28 tmp30 = tmp15 >= tmp25 tmp31 = tmp15 < tmp17 tmp32 = tmp31 & tmp28 tmp33 = tmp30 & tmp32 tmp34 = tmp16 >= tmp25 tmp35 = tmp23 >= tmp25 tmp36 = tmp23 < tmp17 tmp37 = tmp35 & tmp36 tmp38 = tmp18 & tmp37 tmp39 = tmp34 & tmp38 tmp40 = tmp31 & tmp37 tmp41 = tmp30 & tmp40 tmp42 = tmp16 - tmp14 tmp43 = tmp24 - tmp22 tmp44 = tmp42 * tmp43 tmp45 = tl.where(tmp41, tmp44, tmp25) tmp46 = tmp23.to(tl.int64) tmp47 = tl.full([1], 0, tl.int64) tmp48 = tl.where(tmp39, tmp46, tmp47) tmp49 = tl.full([XBLOCK], 4, tl.int32) tmp50 = tmp48 + tmp49 tmp51 = tmp48 < 0 tmp52 = tl.where(tmp51, tmp50, tmp48) tl.device_assert((0 <= tmp52) & (tmp52 < 4) | ~xmask, 'index out of bounds: 0 <= tmp52 < 4') tmp54 = tmp16.to(tl.int64) tmp55 = tl.where(tmp39, tmp54, tmp47) tmp56 = tmp55 + tmp49 tmp57 = tmp55 < 0 tmp58 = tl.where(tmp57, tmp56, tmp55) tl.device_assert((0 <= tmp58) & (tmp58 < 4) | ~xmask, 'index out of bounds: 0 <= tmp58 < 4') tmp60 = tmp14 - tmp15 tmp61 = tmp60 * tmp43 tmp62 = tl.where(tmp39, tmp61, tmp25) tmp63 = tmp8 * tmp62 tmp64 = tmp24.to(tl.int64) tmp65 = tl.where(tmp33, tmp64, tmp47) tmp66 = tmp65 + tmp49 tmp67 = tmp65 < 0 tmp68 = tl.where(tmp67, tmp66, tmp65) tl.device_assert((0 <= tmp68) & (tmp68 < 4) | ~xmask, 'index out of bounds: 0 <= tmp68 < 4') tmp70 = tmp15.to(tl.int64) tmp71 = tl.where(tmp33, tmp70, tmp47) tmp72 = tmp71 + tmp49 tmp73 = tmp71 < 0 tmp74 = tl.where(tmp73, tmp72, tmp71) tl.device_assert((0 <= tmp74) & (tmp74 < 4) | ~xmask, 'index out of bounds: 0 <= tmp74 < 4') tmp76 = tmp22 - tmp23 tmp77 = tmp42 * tmp76 tmp78 = tl.where(tmp33, tmp77, tmp25) tmp79 = tmp8 * tmp78 tmp80 = tmp34 & tmp29 tmp81 = tmp60 * tmp76 tmp82 = tl.where(tmp80, tmp81, tmp25) tmp83 = tl.where(tmp41, tmp46, tmp47) tmp84 = tl.where(tmp41, tmp70, tmp47) tmp85 = tl.where(tmp80, tmp64, tmp47) tmp86 = tmp83 + tmp49 tmp87 = tmp83 < 0 tmp88 = tl.where(tmp87, tmp86, tmp83) tl.device_assert((0 <= tmp88) & (tmp88 < 4) | ~xmask, 'index out of bounds: 0 <= tmp88 < 4') tmp90 = tmp84 + tmp49 tmp91 = tmp84 < 0 tmp92 = tl.where(tmp91, tmp90, tmp84) tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask, 'index out of bounds: 0 <= tmp92 < 4') tmp94 = tmp8 * tmp45 tmp95 = tmp94 + tmp63 tmp96 = tmp95 + tmp79 tmp97 = tmp85 + tmp49 tmp98 = tmp85 < 0 tmp99 = tl.where(tmp98, tmp97, tmp85) tl.device_assert((0 <= tmp99) & (tmp99 < 4) | ~xmask, 'index out of bounds: 0 <= tmp99 < 4') tmp101 = tl.where(tmp80, tmp54, tmp47) tmp102 = tmp101 + tmp49 tmp103 = tmp101 < 0 tmp104 = tl.where(tmp103, tmp102, tmp101) tl.device_assert((0 <= tmp104) & (tmp104 < 4) | ~xmask, 'index out of bounds: 0 <= tmp104 < 4') tmp106 = tmp8 * tmp82 tmp107 = tmp96 + tmp106 tmp108 = tl.load(in_ptr1 + (tmp58 + 4 * tmp52 + 16 * x4), xmask, eviction_policy='evict_last') tmp109 = tmp108 * tmp62 tmp110 = tl.load(in_ptr1 + (tmp74 + 4 * tmp68 + 16 * x4), xmask, eviction_policy='evict_last') tmp111 = tmp110 * tmp78 tmp112 = tl.load(in_ptr1 + (tmp104 + 4 * tmp99 + 16 * x4), xmask, eviction_policy='evict_last') tmp113 = tmp112 * tmp82 tmp114 = tl.load(in_ptr1 + (tmp92 + 4 * tmp88 + 16 * x4), xmask, eviction_policy='evict_last') tmp115 = tmp114 * tmp45 tmp116 = tmp115 + tmp109 tmp117 = tmp116 + tmp111 tmp118 = tmp117 + tmp113 tmp119 = 0.9999 tmp120 = tmp107 > tmp119 tmp121 = tmp120.to(tl.float32) tmp122 = tmp118 * tmp121 tl.store(in_out_ptr5 + x3, tmp122, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_copy_div_mul_repeat_sub_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf12 del buf12 buf27 = buf13 del buf13 triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1[grid(256)]( buf27, buf0, arg1_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 del buf0 return buf27, def coords_grid(flow: 'Tensor') ->Tensor: """Generate shifted coordinate grid based based input flow. Args: flow (Tensor): Estimated optical flow. Returns: Tensor: The coordinate that shifted by input flow and scale in the range [-1, 1]. """ B, _, H, W = flow.shape xx = torch.arange(0, W, device=flow.device, requires_grad=False) yy = torch.arange(0, H, device=flow.device, requires_grad=False) coords = torch.meshgrid(yy, xx) coords = torch.stack(coords[::-1], dim=0).float() grid = coords[None].repeat(B, 1, 1, 1) + flow grid[:, 0, ...] = grid[:, 0, ...] * 2.0 / max(W - 1, 1) - 1.0 grid[:, 1, ...] = grid[:, 1, ...] * 2.0 / max(H - 1, 1) - 1.0 grid = grid.permute(0, 2, 3, 1) return grid class WarpNew(nn.Module): """Warping layer to warp feature using optical flow. Args: mode (str): interpolation mode to calculate output values. Options are 'bilinear' and 'nearest'. Defaults to 'bilinear'. padding_mode (str): padding mode for outside grid values. Options are 'zero', 'border' and 'reflection'. Defaults to 'zeros'. align_corners (bool): If set to True, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If set to False, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default to False. """ def __init__(self, mode: 'str'='bilinear', padding_mode: 'str'='zeros', align_corners: 'bool'=False, use_mask: 'bool'=True) ->None: super().__init__() self.mode = mode self.padding_mode = padding_mode self.align_corners = align_corners self.use_mask = use_mask def __repr__(self): s = self.__class__.__name__ s += f'(mode={self.mode}, ' s += f'padding_mode={self.padding_mode}, ' s += f'align_corners={self.align_corners},' s += f'use_mask={self.use_mask})' return s def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hologerry/mmflow
Warp
false
15,543
[ "Apache-2.0" ]
481
40caf064851bd95317424e31cc137c0007a2bece
https://github.com/hologerry/mmflow/tree/40caf064851bd95317424e31cc137c0007a2bece
PCEN
import torch import torch.nn as nn from torch.nn.parameter import Parameter import torch.quantization import torch.utils.data.distributed class PCEN(nn.Module): def __init__(self): super(PCEN, self).__init__() """ initialising the layer param with the best parametrised values i searched on web (scipy using theese values) alpha = 0.98 delta=2 r=0.5 """ self.log_alpha = Parameter(torch.FloatTensor([0.98])) self.log_delta = Parameter(torch.FloatTensor([2])) self.log_r = Parameter(torch.FloatTensor([0.5])) self.eps = 1e-06 def forward(self, x, smoother): smooth = (self.eps + smoother) ** -self.log_alpha pcen = (x * smooth + self.log_delta ) ** self.log_r - self.log_delta ** self.log_r return pcen def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.parameter import Parameter import torch.quantization import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp9 = tl.load(in_ptr3 + 0) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp12 = tl.load(in_ptr4 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp2 = 1e-06 tmp3 = tmp1 + tmp2 tmp6 = -tmp5 tmp7 = libdevice.pow(tmp3, tmp6) tmp8 = tmp0 * tmp7 tmp11 = tmp8 + tmp10 tmp14 = libdevice.pow(tmp11, tmp13) tmp15 = libdevice.pow(tmp10, tmp13) tmp16 = tmp14 - tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_neg_pow_sub_0[grid(256)](primals_3, primals_1, primals_2, primals_4, primals_5, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3, primals_4, primals_5 class PCENNew(nn.Module): def __init__(self): super(PCENNew, self).__init__() """ initialising the layer param with the best parametrised values i searched on web (scipy using theese values) alpha = 0.98 delta=2 r=0.5 """ self.log_alpha = Parameter(torch.FloatTensor([0.98])) self.log_delta = Parameter(torch.FloatTensor([2])) self.log_r = Parameter(torch.FloatTensor([0.5])) self.eps = 1e-06 def forward(self, input_0, input_1): primals_2 = self.log_alpha primals_4 = self.log_delta primals_5 = self.log_r primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
hovercraft-github/wav2letter.pytorch
PCEN
false
15,544
[ "MIT" ]
121
e2b82b418a7854522540e0925bcf894c0ca80e6a
https://github.com/hovercraft-github/wav2letter.pytorch/tree/e2b82b418a7854522540e0925bcf894c0ca80e6a
MultiHeadAttention
import math import torch import torch.utils.data from torch import nn from torch.nn import LayerNorm as FusedLayerNorm class MultiHeadAttention(nn.Module): """ Multi-head scaled dot-product attention layer. Args: hidden_size: size of the embeddings in the model, also known as d_model num_attention_heads: number of heads in multi-head attention attn_score_dropout: probability of dropout applied to attention scores attn_layer_dropout: probability of dropout applied to the output of the whole layer, but before layer normalization """ def __init__(self, hidden_size, num_attention_heads, attn_score_dropout =0.0, attn_layer_dropout=0.0): super().__init__() if hidden_size % num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (hidden_size, num_attention_heads)) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.attn_head_size = int(hidden_size / num_attention_heads) self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size)) self.query_net = nn.Linear(hidden_size, hidden_size) self.key_net = nn.Linear(hidden_size, hidden_size) self.value_net = nn.Linear(hidden_size, hidden_size) self.out_projection = nn.Linear(hidden_size, hidden_size) self.attn_dropout = nn.Dropout(attn_score_dropout) self.layer_dropout = nn.Dropout(attn_layer_dropout) self.layer_norm = FusedLayerNorm(hidden_size, eps=1e-05) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attn_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, queries, keys, values, attention_mask=None): query = self.query_net(queries) key = self.key_net(keys) value = self.value_net(values) query = self.transpose_for_scores(query) / self.attn_scale key = self.transpose_for_scores(key) / self.attn_scale value = self.transpose_for_scores(value) attention_scores = torch.matmul(query, key.transpose(-1, -2)).float() if attention_mask is not None: attention_scores = attention_scores + attention_mask.float() attention_probs = torch.softmax(attention_scores, dim=-1) attention_probs = self.attn_dropout(attention_probs) context = torch.matmul(attention_probs, value) context = context.permute(0, 2, 1, 3).contiguous() new_context_shape = context.size()[:-2] + (self.hidden_size,) context = context.view(*new_context_shape) output_states = self.out_projection(context) output_states = self.layer_dropout(output_states) output_states = self.layer_norm(queries + output_states) return output_states def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'hidden_size': 4, 'num_attention_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data from torch import nn from torch.nn import LayerNorm as FusedLayerNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_div_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_div_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_div_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf11, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf11, buf12, buf13, primals_12, primals_13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_13 return buf14, primals_3, primals_12, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), buf11, primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadAttentionNew(nn.Module): """ Multi-head scaled dot-product attention layer. Args: hidden_size: size of the embeddings in the model, also known as d_model num_attention_heads: number of heads in multi-head attention attn_score_dropout: probability of dropout applied to attention scores attn_layer_dropout: probability of dropout applied to the output of the whole layer, but before layer normalization """ def __init__(self, hidden_size, num_attention_heads, attn_score_dropout =0.0, attn_layer_dropout=0.0): super().__init__() if hidden_size % num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (hidden_size, num_attention_heads)) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.attn_head_size = int(hidden_size / num_attention_heads) self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size)) self.query_net = nn.Linear(hidden_size, hidden_size) self.key_net = nn.Linear(hidden_size, hidden_size) self.value_net = nn.Linear(hidden_size, hidden_size) self.out_projection = nn.Linear(hidden_size, hidden_size) self.attn_dropout = nn.Dropout(attn_score_dropout) self.layer_dropout = nn.Dropout(attn_layer_dropout) self.layer_norm = FusedLayerNorm(hidden_size, eps=1e-05) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attn_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1, input_2): primals_1 = self.query_net.weight primals_2 = self.query_net.bias primals_4 = self.key_net.weight primals_5 = self.key_net.bias primals_7 = self.value_net.weight primals_8 = self.value_net.bias primals_10 = self.out_projection.weight primals_11 = self.out_projection.bias primals_12 = self.layer_norm.weight primals_13 = self.layer_norm.bias primals_3 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
hieuvecto/CASIA-SURF_CeFA
MultiHeadAttention
false
15,545
[ "MIT" ]
133
71dfd846ce968b3ed26974392a6e0c9b40aa12ae
https://github.com/hieuvecto/CASIA-SURF_CeFA/tree/71dfd846ce968b3ed26974392a6e0c9b40aa12ae
GlobalAveragePooling
import torch from torch import nn import torch.nn.functional as F class GlobalAveragePooling(nn.Module): def __init__(self): super(GlobalAveragePooling, self).__init__() def forward(self, feat): num_channels = feat.size(1) return F.avg_pool2d(feat, (feat.size(2), feat.size(3))).view(-1, num_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalAveragePoolingNew(nn.Module): def __init__(self): super(GlobalAveragePoolingNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hugovk/EnAET
GlobalAveragePooling
false
15,546
[ "MIT" ]
87
596a1be95f4ebfc5fc4f372f251e66fb03e23b5a
https://github.com/hugovk/EnAET/tree/596a1be95f4ebfc5fc4f372f251e66fb03e23b5a
BPR_max
import torch import torch.nn as nn import torch.nn.functional as F class BPR_max(nn.Module): def __init__(self): super(BPR_max, self).__init__() def forward(self, logit): logit_softmax = F.softmax(logit, dim=1) diff = logit.diag().view(-1, 1).expand_as(logit) - logit loss = -torch.log(torch.mean(logit_softmax * torch.sigmoid(diff))) return loss def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_per_fused__softmax_log_mean_mul_neg_sigmoid_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + 5 * r1, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + r2, None) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl.sigmoid(tmp11) tmp13 = tmp8 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = tl_math.log(tmp18) tmp20 = -tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__softmax_log_mean_mul_neg_sigmoid_sub_1[grid(1)](buf2, buf0, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class BPR_maxNew(nn.Module): def __init__(self): super(BPR_maxNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hungthanhpham94/GRU4REC-pytorch
BPR_max
false
15,547
[ "Apache-2.0" ]
184
666b84264c4afae757fe55c6997dcf0a4da1d44e
https://github.com/hungthanhpham94/GRU4REC-pytorch/tree/666b84264c4afae757fe55c6997dcf0a4da1d44e
mbr_convex_hull
import torch import torch.nn as nn class mbr_convex_hull(nn.Module): def _init_(self, hull_points_2d): super(mbr_convex_hull, self)._init_() self.hull_points_2d = hull_points_2d return def forward(ctx, hull_points_2d): N = hull_points_2d.shape[0] edges = hull_points_2d[1:N, :].add(-hull_points_2d[0:N - 1, :]) edge_angles = torch.atan2(edges[:, 1], edges[:, 0]) edge_angles = torch.fmod(edge_angles, 3.1415926 / 2.0) edge_angles = torch.abs(edge_angles) a = torch.stack((torch.cos(edge_angles), torch.cos(edge_angles - 3.1415926 / 2.0)), 1) a = torch.unsqueeze(a, 1) b = torch.stack((torch.cos(edge_angles + 3.1415926 / 2.0), torch. cos(edge_angles)), 1) b = torch.unsqueeze(b, 1) R_tensor = torch.cat((a, b), 1) hull_points_2d_ = torch.unsqueeze(torch.transpose(hull_points_2d, 0, 1), 0) rot_points = R_tensor.matmul(hull_points_2d_) min_x = torch.min(rot_points, 2)[0] max_x = torch.max(rot_points, 2)[0] areas = (max_x[:, 0] - min_x[:, 0]).mul(max_x[:, 1] - min_x[:, 1]) return torch.min(areas) def get_inputs(): return [torch.rand([4, 2, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (12 + 8 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (4 + 8 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = -tmp6 tmp8 = tmp5 + tmp7 tmp9 = tl.load(in_ptr0 + (8 + 8 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr0 + (8 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = -tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.atan2(tmp8, tmp12) tmp14 = 1.5707963 tmp15 = libdevice.fmod(tmp13, tmp14) tmp16 = tl_math.abs(tmp15) tmp17 = tl_math.cos(tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp4, tmp17, tmp18) tmp20 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp23 = tl.load(in_ptr0 + (12 + 8 * x1 + (-4 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr0 + (4 + 8 * x1 + (-4 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = -tmp24 tmp26 = tmp23 + tmp25 tmp27 = tl.load(in_ptr0 + (8 + 8 * x1 + (-4 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr0 + (8 * x1 + (-4 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = -tmp28 tmp30 = tmp27 + tmp29 tmp31 = libdevice.atan2(tmp26, tmp30) tmp32 = libdevice.fmod(tmp31, tmp14) tmp33 = tl_math.abs(tmp32) tmp34 = tmp33 - tmp14 tmp35 = tl_math.cos(tmp34) tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp20, tmp35, tmp36) tmp38 = tl.where(tmp4, tmp19, tmp37) tmp39 = tmp16 + tmp14 tmp40 = tl_math.cos(tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp4, tmp40, tmp41) tmp43 = tl_math.cos(tmp33) tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp20, tmp43, tmp44) tmp46 = tl.where(tmp4, tmp42, tmp45) tl.store(out_ptr0 + x2, tmp38, xmask) tl.store(out_ptr1 + x2, tmp46, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 2 x0 = xindex % 8 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 8 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 8 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 2 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 8 * x1), xmask, eviction_policy ='evict_last') tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_per_fused_min_mul_sub_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp5 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), rmask, other=0.0) tmp6 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), rmask, other=0.0) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = triton_helpers.minimum(tmp0, tmp1) tmp4 = tmp2 - tmp3 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = triton_helpers.minimum(tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tmp4 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(rmask, tmp11, float('inf')) tmp14 = triton_helpers.min2(tmp13, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 2, 4), (8, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((3, 8), (8, 1), torch.float32) buf1 = empty_strided_cuda((3, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(24)](arg0_1, buf0, buf1, 24, XBLOCK= 32, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((3, 2, 2, 4), (16, 8, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(48)](buf0, buf1, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 buf3 = empty_strided_cuda((3, 2, 4, 4), (32, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(96)](arg0_1, buf3, 96, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf4 = empty_strided_cuda((6, 2, 4), (8, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (6, 2, 4), (8, 4, 1), 0 ), reinterpret_tensor(buf3, (6, 4, 4), (16, 4, 1), 0), out=buf4) del buf2 del buf3 buf5 = empty_strided_cuda((), (), torch.float32) triton_per_fused_min_mul_sub_3[grid(1)](buf4, buf5, 1, 12, XBLOCK=1, num_warps=2, num_stages=1) del buf4 return buf5, class mbr_convex_hullNew(nn.Module): def _init_(self, hull_points_2d): super(mbr_convex_hullNew, self)._init_() self.hull_points_2d = hull_points_2d return def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hlesmqh/WS3D
mbr_convex_hull
false
15,548
[ "MIT" ]
100
6816eeb135923a59de34ee5d94be2d0fd3ec83f9
https://github.com/hlesmqh/WS3D/tree/6816eeb135923a59de34ee5d94be2d0fd3ec83f9
GlobalWeightedAvgPool2d
import torch from torch import nn class GlobalWeightedAvgPool2d(nn.Module): """ Global Weighted Average Pooling from paper "Global Weighted Average Pooling Bridges Pixel-level Localization and Image-level Classification" """ def __init__(self, features: 'int', flatten=False): super().__init__() self.conv = nn.Conv2d(features, 1, kernel_size=1, bias=True) self.flatten = flatten def fscore(self, x): m = self.conv(x) m = m.sigmoid().exp() return m def norm(self, x: 'torch.Tensor'): return x / x.sum(dim=[2, 3], keepdim=True) def forward(self, x): input_x = x x = self.fscore(x) x = self.norm(x) x = x * input_x x = x.sum(dim=[2, 3], keepdim=not self.flatten) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_convolution_exp_sigmoid_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp3, xmask) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_div_exp_mul_sigmoid_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 / tmp3 tmp6 = tmp4 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) get_raw_stream(0) triton_per_fused_convolution_exp_sigmoid_sum_0[grid(4)](buf1, primals_3, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_per_fused_div_exp_mul_sigmoid_sum_1[grid(16)](buf1, buf2, primals_1, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf3, primals_1, primals_2, buf1, buf2 class GlobalWeightedAvgPool2dNew(nn.Module): """ Global Weighted Average Pooling from paper "Global Weighted Average Pooling Bridges Pixel-level Localization and Image-level Classification" """ def __init__(self, features: 'int', flatten=False): super().__init__() self.conv = nn.Conv2d(features, 1, kernel_size=1, bias=True) self.flatten = flatten def fscore(self, x): m = self.conv(x) m = m.sigmoid().exp() return m def norm(self, x: 'torch.Tensor'): return x / x.sum(dim=[2, 3], keepdim=True) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
huangjiadidi/dfdc_deepfake_challenge
GlobalWeightedAvgPool2d
false
15,549
[ "MIT" ]
499
1f78fe93a5a445ced386e43b3b0378ee567eaa77
https://github.com/huangjiadidi/dfdc_deepfake_challenge/tree/1f78fe93a5a445ced386e43b3b0378ee567eaa77
ScalableTanh
import torch from torch import nn class ScalableTanh(nn.Module): def __init__(self, input_size): super(ScalableTanh, self).__init__() self.scale = nn.Parameter(torch.zeros(input_size), requires_grad=True) def forward(self, x): return self.scale * torch.tanh(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_tanh_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class ScalableTanhNew(nn.Module): def __init__(self, input_size): super(ScalableTanhNew, self).__init__() self.scale = nn.Parameter(torch.zeros(input_size), requires_grad=True) def forward(self, input_0): primals_1 = self.scale primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
hongyehu/NeuralRG
ScalableTanh
false
15,550
[ "Apache-2.0" ]
65
ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8
https://github.com/hongyehu/NeuralRG/tree/ff4eb18f7f9e083dac6f3da3995f3f69ecf381e8
Net
import torch import torch.nn as nn class Net(nn.Module): def __init__(self, n_inputs, n_units=[50, 50, 50]): super(Net, self).__init__() self.fc1 = nn.Linear(n_inputs, n_units[0]) self.fc2 = nn.Linear(n_units[0], n_units[1]) self.fc3 = nn.Linear(n_units[1], n_units[2]) self.out = nn.Linear(n_units[2], 1) def forward(self, x): x = torch.tanh(self.fc1(x)) x = torch.tanh(self.fc2(x)) x = torch.tanh(self.fc3(x)) return self.out(x) def basis_funcs(self, x, bias=False, linear=False): raw_x = x x = torch.tanh(self.fc1(x)) x = torch.tanh(self.fc2(x)) x = torch.tanh(self.fc3(x)) if linear: x = torch.cat((x, raw_x), dim=-1) if bias: x = torch.cat((x, torch.ones(size=(raw_x.shape[0], 1))), dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_inputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (50, 50), (50, 1)) assert_size_stride(primals_5, (50,), (1,)) assert_size_stride(primals_6, (50, 50), (50, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (1, 50), (50, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(3200)](buf1, primals_2, 3200, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 50), (1, 50), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(3200)](buf3, primals_5, 3200, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 50), (50, 1), 0), reinterpret_tensor(primals_6, (50, 50), (1, 50), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf4 triton_poi_fused_tanh_0[grid(3200)](buf5, primals_7, 3200, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 50), (50, 1), 0), reinterpret_tensor(primals_8, (50, 1), (1, 50), 0), alpha=1, beta=1, out=buf7) del primals_9 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_8, primals_6, primals_4 class NetNew(nn.Module): def __init__(self, n_inputs, n_units=[50, 50, 50]): super(NetNew, self).__init__() self.fc1 = nn.Linear(n_inputs, n_units[0]) self.fc2 = nn.Linear(n_units[0], n_units[1]) self.fc3 = nn.Linear(n_units[1], n_units[2]) self.out = nn.Linear(n_units[2], 1) def basis_funcs(self, x, bias=False, linear=False): raw_x = x x = torch.tanh(self.fc1(x)) x = torch.tanh(self.fc2(x)) x = torch.tanh(self.fc3(x)) if linear: x = torch.cat((x, raw_x), dim=-1) if bias: x = torch.cat((x, torch.ones(size=(raw_x.shape[0], 1))), dim=-1) return x def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.out.weight primals_9 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
hssandriss/pybnn
Net
false
15,551
[ "BSD-3-Clause" ]
110
e878553a24ce9ebdde9088f285c7f292e4ee8885
https://github.com/hssandriss/pybnn/tree/e878553a24ce9ebdde9088f285c7f292e4ee8885
ScaleDotProductAttention
import math import torch from torch import nn class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax(dim=-1) def forward(self, q, k, v, mask=None, e=1e-12): _batch_size, _head, _length, d_tensor = k.size() k_t = k.transpose(2, 3) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class ScaleDotProductAttentionNew(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttentionNew, self).__init__() self.softmax = nn.Softmax(dim=-1) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
hyunwoongko/transformer
ScaleDotProductAttention
false
15,552
[ "Apache-2.0" ]
233
8f7aaa19d37b088c156db0512868127ba9bf1a0f
https://github.com/hyunwoongko/transformer/tree/8f7aaa19d37b088c156db0512868127ba9bf1a0f
TOP1_max
import torch import torch.nn as nn import torch.nn.functional as F class TOP1_max(nn.Module): def __init__(self): super(TOP1_max, self).__init__() def forward(self, logit): logit_softmax = F.softmax(logit, dim=1) diff = -(logit.diag().view(-1, 1).expand_as(logit) - logit) loss = torch.mean(logit_softmax * (torch.sigmoid(diff) + torch. sigmoid(logit ** 2))) return loss def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_per_fused__softmax_add_mean_mul_neg_pow_sigmoid_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + 5 * r1, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + r2, None) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp11 = tmp9 - tmp10 tmp12 = -tmp11 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp10 * tmp10 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp8 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 16.0 tmp22 = tmp20 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__softmax_add_mean_mul_neg_pow_sigmoid_sub_1[grid(1)]( buf2, buf0, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class TOP1_maxNew(nn.Module): def __init__(self): super(TOP1_maxNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hungthanhpham94/GRU4REC-pytorch
TOP1_max
false
15,553
[ "Apache-2.0" ]
184
666b84264c4afae757fe55c6997dcf0a4da1d44e
https://github.com/hungthanhpham94/GRU4REC-pytorch/tree/666b84264c4afae757fe55c6997dcf0a4da1d44e
ConvolutionModule
import torch from torch import Tensor from torch import nn class Swish(torch.nn.Module): """Construct an Swish object.""" def forward(self, x: 'Tensor') ->Tensor: """Return Swich activation function.""" return x * torch.sigmoid(x) class ConvolutionModule(nn.Module): """ConvolutionModule in Conformer model. Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py Args: channels (int): The number of channels of conv layers. kernel_size (int): Kernerl size of conv layers. bias (bool): Whether to use bias in conv layers (default=True). """ def __init__(self, channels: 'int', kernel_size: 'int', bias: 'bool'=True ) ->None: """Construct an ConvolutionModule object.""" super(ConvolutionModule, self).__init__() assert (kernel_size - 1) % 2 == 0 self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias) self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias =bias) self.norm = nn.LayerNorm(channels) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias) self.activation = Swish() def forward(self, x: 'Tensor') ->Tensor: """Compute convolution module. Args: x: Input tensor (#time, batch, channels). Returns: Tensor: Output tensor (#time, batch, channels). """ x = x.permute(1, 2, 0) x = self.pointwise_conv1(x) x = nn.functional.glu(x, dim=1) x = self.depthwise_conv(x) x = x.permute(0, 2, 1) x = self.norm(x) x = x.permute(0, 2, 1) x = self.activation(x) x = self.pointwise_conv2(x) return x.permute(2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'kernel_size': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_glu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y1 = yindex // 4 y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.sigmoid(tmp8) tmp10 = tmp8 * tmp9 tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp10, xmask & ymask) tl.store(out_ptr2 + (x2 + 4 * y3), tmp10, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4), (32, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(128)](buf2, primals_3, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = buf0 del buf0 triton_poi_fused_glu_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_3[grid(64)](buf5, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_4[grid(16)](buf5, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5[grid( 16, 4)](buf5, buf6, buf7, primals_6, primals_7, buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf6 del buf7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4), (16, 4, 1)) del buf10 buf12 = buf11 del buf11 triton_poi_fused_convolution_3[grid(64)](buf12, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return (reinterpret_tensor(buf12, (4, 4, 4), (1, 16, 4), 0), primals_2, primals_4, primals_6, primals_7, primals_8, reinterpret_tensor( primals_1, (4, 4, 4), (4, 1, 16), 0), buf2, buf3, buf5, buf9) class Swish(torch.nn.Module): """Construct an Swish object.""" def forward(self, x: 'Tensor') ->Tensor: """Return Swich activation function.""" return x * torch.sigmoid(x) class ConvolutionModuleNew(nn.Module): """ConvolutionModule in Conformer model. Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py Args: channels (int): The number of channels of conv layers. kernel_size (int): Kernerl size of conv layers. bias (bool): Whether to use bias in conv layers (default=True). """ def __init__(self, channels: 'int', kernel_size: 'int', bias: 'bool'=True ) ->None: """Construct an ConvolutionModule object.""" super(ConvolutionModuleNew, self).__init__() assert (kernel_size - 1) % 2 == 0 self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias) self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias =bias) self.norm = nn.LayerNorm(channels) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias) self.activation = Swish() def forward(self, input_0): primals_2 = self.pointwise_conv1.weight primals_3 = self.pointwise_conv1.bias primals_4 = self.depthwise_conv.weight primals_5 = self.depthwise_conv.bias primals_6 = self.norm.weight primals_7 = self.norm.bias primals_8 = self.pointwise_conv2.weight primals_9 = self.pointwise_conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
huangruizhe/icefall
ConvolutionModule
false
15,554
[ "Apache-2.0" ]
173
ea8af0ee9af5169d93f8f389ffebbc27a1d9e82a
https://github.com/huangruizhe/icefall/tree/ea8af0ee9af5169d93f8f389ffebbc27a1d9e82a
UnetGeneratorWBC
import torch from torch.nn import functional as F import torch.nn as nn def tf_2xupsample_bilinear(x): b, c, h, w = x.shape out = torch.zeros(b, c, h * 2, w * 2) out[:, :, ::2, ::2] = x padded = F.pad(x, (0, 1, 0, 1), mode='replicate') out[:, :, 1::2, ::2] = (padded[:, :, :-1, :-1] + padded[:, :, 1:, :-1]) / 2 out[:, :, ::2, 1::2] = (padded[:, :, :-1, :-1] + padded[:, :, :-1, 1:]) / 2 out[:, :, 1::2, 1::2] = (padded[:, :, :-1, :-1] + padded[:, :, 1:, 1:]) / 2 return out def tf_same_padding(x, k_size=3): j = k_size // 2 return F.pad(x, (j - 1, j, j - 1, j)) class Upsample(nn.Module): """Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data. The input data is assumed to be of the form `minibatch x channels x [optional depth] x [optional height] x width`. Args: size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional): output spatial sizes scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional): multiplier for spatial size. Has to match input size if it is a tuple. mode (str, optional): the upsampling algorithm: one of ``'nearest'``, ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``. Default: ``'nearest'`` align_corners (bool, optional): if ``True``, the corner pixels of the input and output tensors are aligned, and thus preserving the values at those pixels. This only has effect when :attr:`mode` is ``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False`` """ def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None): super(Upsample, self).__init__() if isinstance(scale_factor, tuple): self.scale_factor = tuple(float(factor) for factor in scale_factor) else: self.scale_factor = float(scale_factor) if scale_factor else None self.mode = mode self.size = size self.align_corners = align_corners def forward(self, x): return nn.functional.interpolate(x, size=self.size, scale_factor= self.scale_factor, mode=self.mode, align_corners=self.align_corners ) def extra_repr(self): if self.scale_factor is not None: info = 'scale_factor=' + str(self.scale_factor) else: info = 'size=' + str(self.size) info += ', mode=' + self.mode return info class ResBlock(nn.Module): def __init__(self, in_nf, out_nf=32, slope=0.2): super().__init__() self.conv1 = nn.Conv2d(in_nf, out_nf, 3, 1, padding=1) self.conv2 = nn.Conv2d(out_nf, out_nf, 3, 1, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=False) def forward(self, inputs): x = self.conv2(self.leaky_relu(self.conv1(inputs))) return x + inputs class Upsample_2xBil_TF(nn.Module): def __init__(self): super(Upsample_2xBil_TF, self).__init__() def forward(self, x): return tf_2xupsample_bilinear(x) class UnetGeneratorWBC(nn.Module): """ UNet Generator as used in Learning to Cartoonize Using White-box Cartoon Representations for image to image translation https://systemerrorwang.github.io/White-box-Cartoonization/paper/06791.pdf https://systemerrorwang.github.io/White-box-Cartoonization/paper/06791-supp.pdf """ def __init__(self, nf=32, mode='pt', slope=0.2): super(UnetGeneratorWBC, self).__init__() self.mode = mode self.conv = nn.Conv2d(3, nf, 7, 1, padding=3) if mode == 'tf': self.conv_1 = nn.Conv2d(nf, nf, 3, stride=2, padding=0) else: self.conv_1 = nn.Conv2d(nf, nf, 3, stride=2, padding=1) self.conv_2 = nn.Conv2d(nf, nf * 2, 3, 1, padding=1) if mode == 'tf': self.conv_3 = nn.Conv2d(nf * 2, nf * 2, 3, stride=2, padding=0) else: self.conv_3 = nn.Conv2d(nf * 2, nf * 2, 3, stride=2, padding=1) self.conv_4 = nn.Conv2d(nf * 2, nf * 4, 3, 1, padding=1) self.block_0 = ResBlock(nf * 4, nf * 4, slope=slope) self.block_1 = ResBlock(nf * 4, nf * 4, slope=slope) self.block_2 = ResBlock(nf * 4, nf * 4, slope=slope) self.block_3 = ResBlock(nf * 4, nf * 4, slope=slope) self.conv_5 = nn.Conv2d(nf * 4, nf * 2, 3, 1, padding=1) self.conv_6 = nn.Conv2d(nf * 2, nf * 2, 3, 1, padding=1) self.conv_7 = nn.Conv2d(nf * 2, nf, 3, 1, padding=1) self.conv_8 = nn.Conv2d(nf, nf, 3, 1, padding=1) self.conv_9 = nn.Conv2d(nf, 3, 7, 1, padding=3) self.leaky_relu = nn.LeakyReLU(negative_slope=slope, inplace=False) if mode == 'tf': self.upsample = Upsample_2xBil_TF() else: self.upsample = Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, x): x0 = self.conv(x) x0 = self.leaky_relu(x0) if self.mode == 'tf': x1 = self.conv_1(tf_same_padding(x0)) else: x1 = self.conv_1(x0) x1 = self.leaky_relu(x1) x1 = self.conv_2(x1) x1 = self.leaky_relu(x1) if self.mode == 'tf': x2 = self.conv_3(tf_same_padding(x1)) else: x2 = self.conv_3(x1) x2 = self.leaky_relu(x2) x2 = self.conv_4(x2) x2 = self.leaky_relu(x2) x2 = self.block_3(self.block_2(self.block_1(self.block_0(x2)))) x2 = self.conv_5(x2) x2 = self.leaky_relu(x2) x3 = self.upsample(x2) x3 = self.conv_6(x3 + x1) x3 = self.leaky_relu(x3) x3 = self.conv_7(x3) x3 = self.leaky_relu(x3) x4 = self.upsample(x3) x4 = self.conv_8(x4 + x0) x4 = self.leaky_relu(x4) x4 = self.conv_9(x4) return x4 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_7(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_8(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 15, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_10( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x6 = xindex // 1024 x2 = xindex // 1024 % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr9 + x4, None) tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.2 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tmp51 = tmp49 + tmp50 tl.store(in_out_ptr1 + x4, tmp51, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x6 = xindex // 4096 x2 = xindex // 4096 % 32 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr9 + x4, None) tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.2 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tmp51 = tmp49 + tmp50 tl.store(in_out_ptr1 + x4, tmp51, None) @triton.jit def triton_poi_fused_convolution_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37) = args args.clear() assert_size_stride(primals_1, (32, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (128,), (1,)) assert_size_stride(primals_24, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_29, (64,), (1,)) assert_size_stride(primals_30, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_31, (64,), (1,)) assert_size_stride(primals_32, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_33, (32,), (1,)) assert_size_stride(primals_34, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_35, (32,), (1,)) assert_size_stride(primals_36, (3, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_37, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0, primals_2, buf1, buf2, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, buf4, buf5, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf7 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf6, primals_7, buf7, buf8, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf9 = extern_kernels.convolution(buf8, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 16, 16), (16384, 256, 16, 1)) buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_3[grid(65536)](buf9, primals_9, buf10, buf11, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf9 del primals_9 buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 16, 16), (32768, 256, 16, 1)) buf13 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf14 = reinterpret_tensor(buf3, (4, 128, 16, 16), (32768, 256, 16, 1), 0) del buf3 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf12, primals_11, buf13, buf14, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf15 = extern_kernels.convolution(buf14, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 16, 16), (32768, 256, 16, 1)) buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf17 = buf12 del buf12 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf15, primals_13, buf16, buf17, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 128, 16, 16), (32768, 256, 16, 1)) buf19 = buf18 del buf18 triton_poi_fused_add_convolution_5[grid(131072)](buf19, primals_15, buf14, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 16, 16), (32768, 256, 16, 1)) buf21 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf22 = buf15 del buf15 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf20, primals_17, buf21, buf22, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf23 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 16, 16), (32768, 256, 16, 1)) buf24 = buf23 del buf23 triton_poi_fused_add_convolution_5[grid(131072)](buf24, primals_19, buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf25 = extern_kernels.convolution(buf24, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 128, 16, 16), (32768, 256, 16, 1)) buf26 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf27 = buf20 del buf20 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf25, primals_21, buf26, buf27, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf28 = extern_kernels.convolution(buf27, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 16, 16), (32768, 256, 16, 1)) buf29 = buf28 del buf28 triton_poi_fused_add_convolution_5[grid(131072)](buf29, primals_23, buf24, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_23 buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 128, 16, 16), (32768, 256, 16, 1)) buf31 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf32 = buf25 del buf25 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf30, primals_25, buf31, buf32, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del buf30 del primals_25 buf33 = extern_kernels.convolution(buf32, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 128, 16, 16), (32768, 256, 16, 1)) buf34 = buf33 del buf33 triton_poi_fused_add_convolution_5[grid(131072)](buf34, primals_27, buf29, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_27 buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 16, 16), (16384, 256, 16, 1)) buf36 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf35, primals_29, buf36, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf37 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_7[grid(32)](buf37, 32, XBLOCK=32, num_warps=1, num_stages=1) buf38 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_8[grid(32)](buf38, 32, XBLOCK=32, num_warps=1, num_stages=1) buf39 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_7[grid(32)](buf39, 32, XBLOCK=32, num_warps=1, num_stages=1) buf40 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_add_clamp_8[grid(32)](buf40, 32, XBLOCK=32, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((32,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9[grid(32)](buf43, 32, XBLOCK=32, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((32, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9[grid(32)](buf45, 32, XBLOCK=32, num_warps=1, num_stages=1) buf42 = buf6 del buf6 buf46 = buf42 del buf42 buf47 = buf46 del buf46 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_10[ grid(262144)](buf47, buf38, buf39, buf36, buf35, primals_29, buf37, buf40, buf43, buf45, buf8, 262144, XBLOCK=512, num_warps =8, num_stages=1) del buf35 del primals_29 buf48 = extern_kernels.convolution(buf47, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf49 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf50 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf48, primals_31, buf49, buf50, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf48 del primals_31 buf51 = extern_kernels.convolution(buf50, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf52 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_11[grid(131072)](buf51, primals_33, buf52, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf53 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_12[grid(64)](buf53, 64, XBLOCK=64, num_warps=1, num_stages=1) buf54 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_13[grid(64)](buf54, 64, XBLOCK=64, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_12[grid(64)](buf55, 64, XBLOCK=64, num_warps=1, num_stages=1) buf56 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_13[grid(64)](buf56, 64, XBLOCK=64, num_warps=1, num_stages=1) buf59 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf59, 64, XBLOCK=64, num_warps=1, num_stages=1) buf61 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf61, 64, XBLOCK=64, num_warps=1, num_stages=1) buf58 = buf0 del buf0 buf62 = buf58 del buf58 buf63 = buf62 del buf62 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15[ grid(524288)](buf63, buf54, buf55, buf52, buf51, primals_33, buf53, buf56, buf59, buf61, buf2, 524288, XBLOCK=512, num_warps =8, num_stages=1) del buf51 del primals_33 buf64 = extern_kernels.convolution(buf63, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf65 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf66 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf64, primals_35, buf65, buf66, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf64 del primals_35 buf67 = extern_kernels.convolution(buf66, primals_36, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf68 = buf67 del buf67 triton_poi_fused_convolution_16[grid(49152)](buf68, primals_37, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_37 return (buf68, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf21, buf22, buf24, buf26, buf27, buf29, buf31, buf32, buf34, buf36, buf37, buf38, buf39, buf40, buf43, buf45, buf47, buf49, buf50, buf52, buf53, buf54, buf55, buf56, buf59, buf61, buf63, buf65, buf66) def tf_2xupsample_bilinear(x): b, c, h, w = x.shape out = torch.zeros(b, c, h * 2, w * 2) out[:, :, ::2, ::2] = x padded = F.pad(x, (0, 1, 0, 1), mode='replicate') out[:, :, 1::2, ::2] = (padded[:, :, :-1, :-1] + padded[:, :, 1:, :-1]) / 2 out[:, :, ::2, 1::2] = (padded[:, :, :-1, :-1] + padded[:, :, :-1, 1:]) / 2 out[:, :, 1::2, 1::2] = (padded[:, :, :-1, :-1] + padded[:, :, 1:, 1:]) / 2 return out def tf_same_padding(x, k_size=3): j = k_size // 2 return F.pad(x, (j - 1, j, j - 1, j)) class Upsample(nn.Module): """Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data. The input data is assumed to be of the form `minibatch x channels x [optional depth] x [optional height] x width`. Args: size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional): output spatial sizes scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional): multiplier for spatial size. Has to match input size if it is a tuple. mode (str, optional): the upsampling algorithm: one of ``'nearest'``, ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``. Default: ``'nearest'`` align_corners (bool, optional): if ``True``, the corner pixels of the input and output tensors are aligned, and thus preserving the values at those pixels. This only has effect when :attr:`mode` is ``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False`` """ def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None): super(Upsample, self).__init__() if isinstance(scale_factor, tuple): self.scale_factor = tuple(float(factor) for factor in scale_factor) else: self.scale_factor = float(scale_factor) if scale_factor else None self.mode = mode self.size = size self.align_corners = align_corners def forward(self, x): return nn.functional.interpolate(x, size=self.size, scale_factor= self.scale_factor, mode=self.mode, align_corners=self.align_corners ) def extra_repr(self): if self.scale_factor is not None: info = 'scale_factor=' + str(self.scale_factor) else: info = 'size=' + str(self.size) info += ', mode=' + self.mode return info class ResBlock(nn.Module): def __init__(self, in_nf, out_nf=32, slope=0.2): super().__init__() self.conv1 = nn.Conv2d(in_nf, out_nf, 3, 1, padding=1) self.conv2 = nn.Conv2d(out_nf, out_nf, 3, 1, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=False) def forward(self, inputs): x = self.conv2(self.leaky_relu(self.conv1(inputs))) return x + inputs class Upsample_2xBil_TF(nn.Module): def __init__(self): super(Upsample_2xBil_TF, self).__init__() def forward(self, x): return tf_2xupsample_bilinear(x) class UnetGeneratorWBCNew(nn.Module): """ UNet Generator as used in Learning to Cartoonize Using White-box Cartoon Representations for image to image translation https://systemerrorwang.github.io/White-box-Cartoonization/paper/06791.pdf https://systemerrorwang.github.io/White-box-Cartoonization/paper/06791-supp.pdf """ def __init__(self, nf=32, mode='pt', slope=0.2): super(UnetGeneratorWBCNew, self).__init__() self.mode = mode self.conv = nn.Conv2d(3, nf, 7, 1, padding=3) if mode == 'tf': self.conv_1 = nn.Conv2d(nf, nf, 3, stride=2, padding=0) else: self.conv_1 = nn.Conv2d(nf, nf, 3, stride=2, padding=1) self.conv_2 = nn.Conv2d(nf, nf * 2, 3, 1, padding=1) if mode == 'tf': self.conv_3 = nn.Conv2d(nf * 2, nf * 2, 3, stride=2, padding=0) else: self.conv_3 = nn.Conv2d(nf * 2, nf * 2, 3, stride=2, padding=1) self.conv_4 = nn.Conv2d(nf * 2, nf * 4, 3, 1, padding=1) self.block_0 = ResBlock(nf * 4, nf * 4, slope=slope) self.block_1 = ResBlock(nf * 4, nf * 4, slope=slope) self.block_2 = ResBlock(nf * 4, nf * 4, slope=slope) self.block_3 = ResBlock(nf * 4, nf * 4, slope=slope) self.conv_5 = nn.Conv2d(nf * 4, nf * 2, 3, 1, padding=1) self.conv_6 = nn.Conv2d(nf * 2, nf * 2, 3, 1, padding=1) self.conv_7 = nn.Conv2d(nf * 2, nf, 3, 1, padding=1) self.conv_8 = nn.Conv2d(nf, nf, 3, 1, padding=1) self.conv_9 = nn.Conv2d(nf, 3, 7, 1, padding=3) self.leaky_relu = nn.LeakyReLU(negative_slope=slope, inplace=False) if mode == 'tf': self.upsample = Upsample_2xBil_TF() else: self.upsample = Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.conv_1.weight primals_5 = self.conv_1.bias primals_6 = self.conv_2.weight primals_7 = self.conv_2.bias primals_8 = self.conv_3.weight primals_9 = self.conv_3.bias primals_10 = self.conv_4.weight primals_11 = self.conv_4.bias primals_12 = self.block_0.conv1.weight primals_13 = self.block_0.conv1.bias primals_14 = self.block_0.conv2.weight primals_15 = self.block_0.conv2.bias primals_16 = self.block_1.conv1.weight primals_17 = self.block_1.conv1.bias primals_18 = self.block_1.conv2.weight primals_19 = self.block_1.conv2.bias primals_20 = self.block_2.conv1.weight primals_21 = self.block_2.conv1.bias primals_22 = self.block_2.conv2.weight primals_23 = self.block_2.conv2.bias primals_24 = self.block_3.conv1.weight primals_25 = self.block_3.conv1.bias primals_26 = self.block_3.conv2.weight primals_27 = self.block_3.conv2.bias primals_28 = self.conv_5.weight primals_29 = self.conv_5.bias primals_30 = self.conv_6.weight primals_31 = self.conv_6.bias primals_32 = self.conv_7.weight primals_33 = self.conv_7.bias primals_34 = self.conv_8.weight primals_35 = self.conv_8.bias primals_36 = self.conv_9.weight primals_37 = self.conv_9.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37]) return output[0]
grofit/traiNNer
UnetGeneratorWBC
false
15,555
[ "Apache-2.0" ]
78
12d006fd44ed304e4178839c53b1f3d95ca25dcb
https://github.com/grofit/traiNNer/tree/12d006fd44ed304e4178839c53b1f3d95ca25dcb
IOUloss
import torch import torch.nn as nn import torch.utils.data class IOUloss(nn.Module): def __init__(self, reduction='none', loss_type='iou'): super(IOUloss, self).__init__() self.reduction = reduction self.loss_type = loss_type def forward(self, pred, target): assert pred.shape[0] == target.shape[0] pred = pred.view(-1, 4) target = target.view(-1, 4) tl = torch.max(pred[:, :2] - pred[:, 2:] / 2, target[:, :2] - target[:, 2:] / 2) br = torch.min(pred[:, :2] + pred[:, 2:] / 2, target[:, :2] + target[:, 2:] / 2) area_p = torch.prod(pred[:, 2:], 1) area_g = torch.prod(target[:, 2:], 1) en = (tl < br).type(tl.type()).prod(dim=1) area_i = torch.prod(br - tl, 1) * en iou = area_i / (area_p + area_g - area_i + 1e-16) if self.loss_type == 'iou': loss = 1 - iou ** 2 elif self.loss_type == 'giou': c_tl = torch.min(pred[:, :2] - pred[:, 2:] / 2, target[:, :2] - target[:, 2:] / 2) c_br = torch.max(pred[:, :2] + pred[:, 2:] / 2, target[:, :2] + target[:, 2:] / 2) area_c = torch.prod(c_br - c_tl, 1) giou = iou - (area_c - area_i) / area_c.clamp(1e-16) loss = 1 - giou.clamp(min=-1.0, max=1.0) if self.reduction == 'mean': loss = loss.mean() elif self.reduction == 'sum': loss = loss.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp6 * tmp2 tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.minimum(tmp4, tmp8) tmp10 = tmp0 - tmp3 tmp11 = tmp5 - tmp7 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp9 - tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tmp18 + tmp20 tmp22 = triton_helpers.minimum(tmp17, tmp21) tmp23 = tmp14 - tmp16 tmp24 = tmp18 - tmp20 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tmp22 - tmp25 tmp27 = tmp13 * tmp26 tmp28 = tmp12 < tmp9 tmp29 = tmp28.to(tl.float32) tmp30 = tmp25 < tmp22 tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 * tmp31 tmp33 = tmp27 * tmp32 tmp34 = tmp1 * tmp15 tmp35 = tmp6 * tmp19 tmp36 = tmp34 + tmp35 tmp37 = tmp36 - tmp33 tmp38 = 1e-16 tmp39 = tmp37 + tmp38 tmp40 = tmp33 / tmp39 tmp41 = tmp40 * tmp40 tmp42 = 1.0 tmp43 = tmp42 - tmp41 tl.store(in_out_ptr0 + x0, tmp43, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0[ grid(64)](buf1, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class IOUlossNew(nn.Module): def __init__(self, reduction='none', loss_type='iou'): super(IOUlossNew, self).__init__() self.reduction = reduction self.loss_type = loss_type def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hyperfraise/ByteTrack
IOUloss
false
15,556
[ "MIT" ]
1,039
d742a3321c14a7412f024f2218142c7441c1b699
https://github.com/hyperfraise/ByteTrack/tree/d742a3321c14a7412f024f2218142c7441c1b699
MyBCEWithLogitsLoss
import torch import torch.utils.data import torch import torch.nn as nn class MyBCEWithLogitsLoss(nn.Module): def __init__(self): nn.Module.__init__(self) self.m = nn.BCEWithLogitsLoss() def forward(self, positives, negatives): values = torch.cat((positives, negatives), dim=-1) labels = torch.cat((positives.new_ones(positives.size()), negatives .new_zeros(negatives.size())), dim=-1) return self.m(values, labels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_cat_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 8 r1 = rindex // 8 tmp0 = r0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 1.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp11 = 0.0 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp8, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp7, tmp13) tmp15 = tmp5 - tmp14 tmp16 = tl.load(in_ptr0 + tl.broadcast_to(4 * r1 + r0, [RBLOCK]), tmp4, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + tl.broadcast_to(4 * r1 + (-4 + r0), [RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp15 * tmp18 tmp20 = triton_helpers.minimum(tmp11, tmp18) tmp21 = tl_math.abs(tmp18) tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = libdevice.log1p(tmp23) tmp25 = tmp20 - tmp24 tmp26 = tmp19 - tmp25 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = 512.0 tmp31 = tmp29 / tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_cat_0[grid(1)](buf1, arg0_1, arg1_1, 1, 512, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf1, class MyBCEWithLogitsLossNew(nn.Module): def __init__(self): nn.Module.__init__(self) self.m = nn.BCEWithLogitsLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
huoxusg/ScenarioMeta
MyBCEWithLogitsLoss
false
15,557
[ "MIT" ]
79
ce753da45a3d46ac08961ffc71b2131ae3f7e551
https://github.com/huoxusg/ScenarioMeta/tree/ce753da45a3d46ac08961ffc71b2131ae3f7e551
LayerNorm
import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-12): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) out = (x - mean) / (std + self.eps) out = self.gamma * out + self.beta return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-12 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, d_model, eps=1e-12): super(LayerNormNew, self).__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hyunwoongko/transformer
LayerNorm
false
15,558
[ "Apache-2.0" ]
233
8f7aaa19d37b088c156db0512868127ba9bf1a0f
https://github.com/hyunwoongko/transformer/tree/8f7aaa19d37b088c156db0512868127ba9bf1a0f
KeyValue
import torch import torch.nn import torch.utils.data.dataset class KeyValue(torch.nn.Module): def __init__(self, indim, keydim, valdim): super(KeyValue, self).__init__() self.key_conv = torch.nn.Conv2d(indim, keydim, kernel_size=3, padding=1, stride=1) self.value_conv = torch.nn.Conv2d(indim, valdim, kernel_size=3, padding=1, stride=1) def forward(self, x): return self.key_conv(x), self.value_conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'indim': 4, 'keydim': 4, 'valdim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn import torch.utils.data.dataset assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf1, buf3, primals_1, primals_3, primals_4 class KeyValueNew(torch.nn.Module): def __init__(self, indim, keydim, valdim): super(KeyValueNew, self).__init__() self.key_conv = torch.nn.Conv2d(indim, keydim, kernel_size=3, padding=1, stride=1) self.value_conv = torch.nn.Conv2d(indim, valdim, kernel_size=3, padding=1, stride=1) def forward(self, input_0): primals_1 = self.key_conv.weight primals_2 = self.key_conv.bias primals_4 = self.value_conv.weight primals_5 = self.value_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
hzxie/RMNet
KeyValue
false
15,559
[ "MIT" ]
66
32a16f9c9473463a41dd6e95f72b06dd830fc1eb
https://github.com/hzxie/RMNet/tree/32a16f9c9473463a41dd6e95f72b06dd830fc1eb
Self_Attn
import torch from torch import nn class Self_Attn(nn.Module): """ Self attention Layer""" def __init__(self, in_dim, activation): super(Self_Attn, self).__init__() self.chanel_in = in_dim self.activation = activation if in_dim >= 8: self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim // 8, kernel_size=1, bias=False) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim // 8, kernel_size=1, bias=False) else: self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim, kernel_size=1, bias=False) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim, kernel_size=1, bias=False) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1, bias=False) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) def forward(self, x): """ inputs : x : input feature maps( B X C X W X H) returns : out : self attention value + input feature attention: B X N X N (N is Width*Height) """ m_batchsize, C, width, height = x.size() proj_query = self.query_conv(x).view(m_batchsize, -1, width * height ).permute(0, 2, 1) proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) energy = torch.bmm(proj_query, proj_key) attention = self.softmax(energy) proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = self.gamma * out + x return out, attention def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'activation': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0), out=buf2) buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(64)](buf2, buf5, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf2 buf6 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf5, (4, 16, 16), (256, 1, 16), 0), out =buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(256)](primals_5, buf7, primals_1, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) return (buf8, buf5, primals_1, primals_2, primals_3, primals_4, primals_5, buf5, buf7, reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf1, (4, 16, 4), (64, 1, 16), 0)) class Self_AttnNew(nn.Module): """ Self attention Layer""" def __init__(self, in_dim, activation): super(Self_AttnNew, self).__init__() self.chanel_in = in_dim self.activation = activation if in_dim >= 8: self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim // 8, kernel_size=1, bias=False) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim // 8, kernel_size=1, bias=False) else: self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim, kernel_size=1, bias=False) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels= in_dim, kernel_size=1, bias=False) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1, bias=False) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0): primals_5 = self.gamma primals_2 = self.query_conv.weight primals_3 = self.key_conv.weight primals_4 = self.value_conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
hugovk/EnAET
Self_Attn
false
15,560
[ "MIT" ]
87
596a1be95f4ebfc5fc4f372f251e66fb03e23b5a
https://github.com/hugovk/EnAET/tree/596a1be95f4ebfc5fc4f372f251e66fb03e23b5a
QueryEncoder
import torch from torch import nn import torch.nn.functional as F class QueryEncoder(nn.Module): def __init__(self, input_size): super(QueryEncoder, self).__init__() self.fc1 = nn.Linear(input_size, 16) self.fc2 = nn.Linear(16, 10) self.fc3 = nn.Linear(10, 8) def forward(self, x): out = F.relu(self.fc1(x)) out = F.relu(self.fc2(out)) out = F.relu(self.fc3(out)) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 16), (16, 1)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (8, 10), (10, 1)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf8, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 10), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(640)](buf3, primals_5, buf7, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 8), (1, 10), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(512)](buf5, primals_7, buf6, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor( buf3, (64, 10), (10, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class QueryEncoderNew(nn.Module): def __init__(self, input_size): super(QueryEncoderNew, self).__init__() self.fc1 = nn.Linear(input_size, 16) self.fc2 = nn.Linear(16, 10) self.fc3 = nn.Linear(10, 8) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
huyi-work/UnifiedEmbeddingModel
QueryEncoder
false
15,561
[ "MIT" ]
50
85c8442122213d1f1b1027df0fd34f428259aaa4
https://github.com/huyi-work/UnifiedEmbeddingModel/tree/85c8442122213d1f1b1027df0fd34f428259aaa4
MyHingeLoss
import torch import torch.utils.data import torch import torch.nn as nn class MyHingeLoss(nn.Module): def __init__(self, margin=0.0): nn.Module.__init__(self) self.m = nn.MarginRankingLoss(margin=margin) def forward(self, positives, negatives): labels = positives.new_ones(positives.size()) return self.m(positives, negatives, labels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_min_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 0.0 tmp6 = tmp4 + tmp5 tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_min_mean_mul_neg_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MyHingeLossNew(nn.Module): def __init__(self, margin=0.0): nn.Module.__init__(self) self.m = nn.MarginRankingLoss(margin=margin) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
huoxusg/ScenarioMeta
MyHingeLoss
false
15,562
[ "MIT" ]
79
ce753da45a3d46ac08961ffc71b2131ae3f7e551
https://github.com/huoxusg/ScenarioMeta/tree/ce753da45a3d46ac08961ffc71b2131ae3f7e551
DocumentEncoder
import torch from torch import nn import torch.nn.functional as F class DocumentEncoder(nn.Module): def __init__(self, input_size, hidden_layer_sizes=(100,), activation=( 'relu',), solver='adam'): super(DocumentEncoder, self).__init__() self.fc1 = nn.Linear(input_size, 12) self.fc2 = nn.Linear(12, 8) def forward(self, x): out = F.relu(self.fc1(x)) out = F.relu(self.fc2(out)) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 12), (12, 1)) assert_size_stride(primals_5, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(768)](buf1, primals_2, buf5, 768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(primals_4, (12, 8), (1, 12), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(512)](buf3, primals_5, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 12), (12, 1), 0 ), buf4, primals_4, buf5 class DocumentEncoderNew(nn.Module): def __init__(self, input_size, hidden_layer_sizes=(100,), activation=( 'relu',), solver='adam'): super(DocumentEncoderNew, self).__init__() self.fc1 = nn.Linear(input_size, 12) self.fc2 = nn.Linear(12, 8) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
huyi-work/UnifiedEmbeddingModel
DocumentEncoder
false
15,563
[ "MIT" ]
50
85c8442122213d1f1b1027df0fd34f428259aaa4
https://github.com/huyi-work/UnifiedEmbeddingModel/tree/85c8442122213d1f1b1027df0fd34f428259aaa4
PositionalEncodingImageBoxes
import torch from torch import nn as nn import torch.nn.init from torchvision import models as models class PositionalEncodingImageBoxes(nn.Module): def __init__(self, d_model, mode='project-and-sum'): super().__init__() self.mode = mode if mode == 'project-and-sum': self.map = nn.Linear(5, d_model) elif mode == 'concat-and-process': self.map = nn.Sequential(nn.Linear(d_model + 5, d_model), nn. ReLU(), nn.Linear(d_model, d_model)) def forward(self, x, boxes): bs = x.shape[1] area = (boxes[:, :, 2] - boxes[:, :, 0]) * (boxes[:, :, 3] - boxes[ :, :, 1]) area = area.unsqueeze(2) s_infos = torch.cat([boxes, area], dim=2) if self.mode == 'project-and-sum': ct = self.map(s_infos).permute(1, 0, 2) x = x + ct.expand(-1, bs, -1) elif self.mode == 'concat-and-process': x = torch.cat([x, s_infos.permute(1, 0, 2)], dim=2) x = self.map(x) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn import torch.nn.init from torchvision import models as models assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.load(in_ptr0 + 4 * x1, tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 - tmp10 tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp13 = tl.load(in_ptr0 + (1 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp12 - tmp13 tmp15 = tmp11 * tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp6, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp5, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1), xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 5), (5, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](primals_2, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 5), (5, 1), 0), reinterpret_tensor(primals_3, (5, 4), (1, 5), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_1[grid(64)](primals_1, buf1, primals_4, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del primals_1 del primals_4 return buf2, reinterpret_tensor(buf0, (16, 5), (5, 1), 0) class PositionalEncodingImageBoxesNew(nn.Module): def __init__(self, d_model, mode='project-and-sum'): super().__init__() self.mode = mode if mode == 'project-and-sum': self.map = nn.Linear(5, d_model) elif mode == 'concat-and-process': self.map = nn.Sequential(nn.Linear(d_model + 5, d_model), nn. ReLU(), nn.Linear(d_model, d_model)) def forward(self, input_0, input_1): primals_3 = self.map.weight primals_4 = self.map.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
huylb314/TERAN
PositionalEncodingImageBoxes
false
15,564
[ "Apache-2.0" ]
46
f6a380db423e75fcdaa6ef44f1a79d293a38efba
https://github.com/huylb314/TERAN/tree/f6a380db423e75fcdaa6ef44f1a79d293a38efba
AdaptiveConcatPool3d
import torch import torch.nn as nn import torch.nn.functional as F class AdaptiveConcatPool3d(nn.Module): def forward(self, x): return torch.cat((F.adaptive_avg_pool3d(x, 1), F. adaptive_max_pool3d(x, 1)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr1 + 2 * x0, tmp6, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + 2 * x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.adaptive_max_pool3d.default(arg0_1, [1, 1, 1]) buf1 = buf0[0] del buf0 buf6 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 1, 1), torch.float32) buf4 = reinterpret_tensor(buf6, (4, 1, 1, 1), (2, 1, 1, 1), 0) get_raw_stream(0) triton_per_fused_mean_0[grid(4)](arg0_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf5 = reinterpret_tensor(buf6, (4, 1, 1, 1), (2, 1, 1, 1), 1) triton_poi_fused_cat_1[grid(4)](buf1, buf5, 4, XBLOCK=4, num_warps= 1, num_stages=1) del buf1 return buf6, class AdaptiveConcatPool3dNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
i-pan/kaggle-melanoma
AdaptiveConcatPool3d
false
15,565
[ "MIT" ]
68
caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
https://github.com/i-pan/kaggle-melanoma/tree/caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
MultiHeadAttention
import math import torch from torch import nn class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax(dim=-1) def forward(self, q, k, v, mask=None, e=1e-12): _batch_size, _head, _length, d_tensor = k.size() k_t = k.transpose(2, 3) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): q, k, v = self.w_q(q), self.w_k(k), self.w_v(v) q, k, v = self.split(q), self.split(k), self.split(v) out, _attention = self.attention(q, k, v, mask=mask) out = self.concat(out) out = self.w_concat(out) return out def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, length, self.n_head, d_tensor ).transpose(1, 2) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.transpose(1, 2).contiguous().view(batch_size, length, d_model) return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'n_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10 class ScaleDotProductAttention(nn.Module): """ compute scale dot product attention Query : given sentence that we focused on (decoder) Key : every sentence to check relationship with Qeury(encoder) Value : every sentence same with Key (encoder) """ def __init__(self): super(ScaleDotProductAttention, self).__init__() self.softmax = nn.Softmax(dim=-1) def forward(self, q, k, v, mask=None, e=1e-12): _batch_size, _head, _length, d_tensor = k.size() k_t = k.transpose(2, 3) score = q @ k_t / math.sqrt(d_tensor) if mask is not None: score = score.masked_fill(mask == 0, -e) score = self.softmax(score) v = score @ v return v, score class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, n_head): super(MultiHeadAttentionNew, self).__init__() self.n_head = n_head self.attention = ScaleDotProductAttention() self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_concat = nn.Linear(d_model, d_model) def split(self, tensor): """ split tensor by number of head :param tensor: [batch_size, length, d_model] :return: [batch_size, head, length, d_tensor] """ batch_size, length, d_model = tensor.size() d_tensor = d_model // self.n_head tensor = tensor.view(batch_size, length, self.n_head, d_tensor ).transpose(1, 2) return tensor def concat(self, tensor): """ inverse function of self.split(tensor : torch.Tensor) :param tensor: [batch_size, head, length, d_tensor] :return: [batch_size, length, d_model] """ batch_size, head, length, d_tensor = tensor.size() d_model = head * d_tensor tensor = tensor.transpose(1, 2).contiguous().view(batch_size, length, d_model) return tensor def forward(self, input_0, input_1, input_2): primals_1 = self.w_q.weight primals_2 = self.w_q.bias primals_4 = self.w_k.weight primals_5 = self.w_k.bias primals_7 = self.w_v.weight primals_8 = self.w_v.bias primals_10 = self.w_concat.weight primals_11 = self.w_concat.bias primals_3 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
hyunwoongko/transformer
MultiHeadAttention
false
15,566
[ "Apache-2.0" ]
233
8f7aaa19d37b088c156db0512868127ba9bf1a0f
https://github.com/hyunwoongko/transformer/tree/8f7aaa19d37b088c156db0512868127ba9bf1a0f
ChannelAttentionBlock
import torch import torch.nn as nn class ChannelAttentionBlock(nn.Module): def __init__(self, in_channels): super(ChannelAttentionBlock, self).__init__() self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) def forward(self, x): """ :param x: input( B x C x H x W ) :return: affinity value + x """ B, C, H, W = x.size() proj_query = x.view(B, C, -1) proj_key = x.view(B, C, -1).permute(0, 2, 1) affinity = torch.matmul(proj_query, proj_key) affinity_new = torch.max(affinity, -1, keepdim=True)[0].expand_as( affinity) - affinity affinity_new = self.softmax(affinity_new) proj_value = x.view(B, C, -1) weights = torch.matmul(affinity_new, proj_value) weights = weights.view(B, C, H, W) out = self.gamma * weights + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_2, buf4, primals_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf5, buf4 class ChannelAttentionBlockNew(nn.Module): def __init__(self, in_channels): super(ChannelAttentionBlockNew, self).__init__() self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0): primals_2 = self.gamma primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
iMED-Lab/ROSE
ChannelAttentionBlock
false
15,567
[ "Apache-2.0" ]
64
8d99a2a06fc645410b1d388193b3148404e61230
https://github.com/iMED-Lab/ROSE/tree/8d99a2a06fc645410b1d388193b3148404e61230
focal_loss
import torch import torch.nn as nn def clip_by_tensor(t, t_min, t_max): """ clip_by_tensor :param t: tensor :param t_min: min :param t_max: max :return: cliped tensor """ t = t.float() result = (t >= t_min).float() * t + (t < t_min).float() * t_min result = (result <= t_max).float() * result + (result > t_max).float( ) * t_max return result class focal_loss(nn.Module): def __init__(self, alpha=0.25, gamma=2.0, size_average=True): super(focal_loss, self).__init__() self.alpha = alpha self.gamma = gamma self.size_average = size_average def forward(self, pred, gt): gt_oh = torch.cat((gt, 1.0 - gt), dim=1) pt = (gt_oh * pred).sum(1) focal_map = -self.alpha * torch.pow(1.0 - pt, self.gamma) * torch.log2( clip_by_tensor(pt, 1e-12, 1.0)) if self.size_average: loss = focal_map.mean() else: loss = focal_map.sum() return loss def get_inputs(): return [torch.rand([4, 8, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_cat_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp15 = tl.load(in_ptr1 + (x0 + 16 * r2 + 128 * x1), xmask, other=0.0) tmp0 = r2 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (x0 + 16 * (-4 + r2) + 64 * x1), tmp6 & xmask, other=0.0) tmp10 = 1.0 tmp11 = tmp10 - tmp9 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tmp16 = tmp14 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr0 + x3, tmp20, xmask) @triton.jit def triton_per_fused__to_copy_add_ge_gt_le_log2_lt_mean_mul_pow_rsub_1( in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp3 = tmp2 * tmp2 tmp4 = -0.25 tmp5 = tmp3 * tmp4 tmp6 = 1e-12 tmp7 = tmp0 >= tmp6 tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp0 tmp10 = tmp0 < tmp6 tmp11 = tmp10.to(tl.float32) tmp12 = tmp11 * tmp6 tmp13 = tmp9 + tmp12 tmp14 = tmp13 <= tmp1 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 * tmp13 tmp17 = tmp13 > tmp1 tmp18 = tmp17.to(tl.float32) tmp19 = tmp18 * tmp1 tmp20 = tmp16 + tmp19 tmp21 = libdevice.log2(tmp20) tmp22 = tmp5 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp26 = 64.0 tmp27 = tmp25 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 8, 4, 4), (128, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cat_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, 8, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__to_copy_add_ge_gt_le_log2_lt_mean_mul_pow_rsub_1[grid (1)](buf2, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, def clip_by_tensor(t, t_min, t_max): """ clip_by_tensor :param t: tensor :param t_min: min :param t_max: max :return: cliped tensor """ t = t.float() result = (t >= t_min).float() * t + (t < t_min).float() * t_min result = (result <= t_max).float() * result + (result > t_max).float( ) * t_max return result class focal_lossNew(nn.Module): def __init__(self, alpha=0.25, gamma=2.0, size_average=True): super(focal_lossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.size_average = size_average def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
iMED-Lab/ROSE
focal_loss
false
15,568
[ "Apache-2.0" ]
64
8d99a2a06fc645410b1d388193b3148404e61230
https://github.com/iMED-Lab/ROSE/tree/8d99a2a06fc645410b1d388193b3148404e61230
DenseCrossEntropy
import torch import torch.nn as nn class DenseCrossEntropy(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp11 tmp17 = -tmp16 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = tmp5 - tmp11 tmp22 = -tmp21 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp8 - tmp11 tmp27 = -tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3, buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class DenseCrossEntropyNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
i-pan/kaggle-melanoma
DenseCrossEntropy
false
15,569
[ "MIT" ]
68
caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
https://github.com/i-pan/kaggle-melanoma/tree/caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
MemoryReader
import math import torch import torch.nn import torch.nn.functional as F import torch.utils.data.dataset class MemoryReader(torch.nn.Module): def __init__(self): super(MemoryReader, self).__init__() def forward(self, m_key, m_val, q_key, q_val): B, D_e, T, H, W = m_key.size() _, D_o, _, _, _ = m_val.size() mi = m_key.view(B, D_e, T * H * W) mi = torch.transpose(mi, 1, 2) qi = q_key.view(B, D_e, H * W) p = torch.bmm(mi, qi) p = p / math.sqrt(D_e) p = F.softmax(p, dim=1) mo = m_val.view(B, D_o, T * H * W) mem = torch.bmm(mo, p) mem = mem.view(B, D_o, H, W) mem_val = torch.cat([mem, q_val], dim=1) return mem_val, p def get_inputs(): return [torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4, 4]), torch .rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn import torch.utils.data.dataset assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 16 x2 = xindex // 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), None, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr1 + (x0 + 16 * x2), None, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = 0.5 tmp6 = tmp4 * tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64, 16), (1024, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 64, 4), (256, 1, 64), 0), reinterpret_tensor(arg2_1, (4, 4, 16), (64, 16, 1), 0), out=buf0) del arg0_1 del arg2_1 buf1 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32) buf2 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(64)](buf0, buf1, buf2, 64, 64, XBLOCK=32, num_warps=8, num_stages=1) buf3 = buf0 del buf0 triton_poi_fused__softmax_1[grid(4096)](buf3, buf1, buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 4, 64), (256, 64, 1), 0), buf3, out=buf4) del arg1_1 buf5 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_2[grid(512)](buf4, arg3_1, buf5, 512, XBLOCK= 256, num_warps=4, num_stages=1) del arg3_1 del buf4 return buf5, buf3 class MemoryReaderNew(torch.nn.Module): def __init__(self): super(MemoryReaderNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
hzxie/RMNet
MemoryReader
false
15,570
[ "MIT" ]
66
32a16f9c9473463a41dd6e95f72b06dd830fc1eb
https://github.com/hzxie/RMNet/tree/32a16f9c9473463a41dd6e95f72b06dd830fc1eb
BPRLoss
import torch import torch.nn as nn import torch.nn.functional as F class BPRLoss(nn.Module): def __init__(self): super(BPRLoss, self).__init__() def forward(self, logit): """ Args: logit (BxB): Variable that stores the logits for the items in the mini-batch The first dimension corresponds to the batches, and the second dimension corresponds to sampled number of items to evaluate """ diff = logit.diag().view(-1, 1).expand_as(logit) - logit loss = -torch.mean(F.logsigmoid(diff)) return loss def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_log_sigmoid_forward_mean_neg_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 r2 = rindex tmp0 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + r2, None) tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp3, tmp2) tmp5 = tl_math.abs(tmp2) tmp6 = -tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = libdevice.log1p(tmp7) tmp9 = tmp4 - tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 16.0 tmp14 = tmp12 / tmp13 tmp15 = -tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_log_sigmoid_forward_mean_neg_sub_0[grid(1)](buf1, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class BPRLossNew(nn.Module): def __init__(self): super(BPRLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hungthanhpham94/GRU4REC-pytorch
BPRLoss
false
15,571
[ "Apache-2.0" ]
184
666b84264c4afae757fe55c6997dcf0a4da1d44e
https://github.com/hungthanhpham94/GRU4REC-pytorch/tree/666b84264c4afae757fe55c6997dcf0a4da1d44e
LossFunction
import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F class BPRLoss(nn.Module): def __init__(self): super(BPRLoss, self).__init__() def forward(self, logit): """ Args: logit (BxB): Variable that stores the logits for the items in the mini-batch The first dimension corresponds to the batches, and the second dimension corresponds to sampled number of items to evaluate """ diff = logit.diag().view(-1, 1).expand_as(logit) - logit loss = -torch.mean(F.logsigmoid(diff)) return loss class BPR_max(nn.Module): def __init__(self): super(BPR_max, self).__init__() def forward(self, logit): logit_softmax = F.softmax(logit, dim=1) diff = logit.diag().view(-1, 1).expand_as(logit) - logit loss = -torch.log(torch.mean(logit_softmax * torch.sigmoid(diff))) return loss class SampledCrossEntropyLoss(nn.Module): """ CrossEntropyLoss with n_classes = batch_size = the number of samples in the session-parallel mini-batch """ def __init__(self, use_cuda): """ Args: use_cuda (bool): whether to use cuda or not """ super(SampledCrossEntropyLoss, self).__init__() self.xe_loss = nn.CrossEntropyLoss() self.use_cuda = use_cuda def forward(self, logit): batch_size = logit.size(1) target = Variable(torch.arange(batch_size).long()) if self.use_cuda: target = target return self.xe_loss(logit, target) class TOP1Loss(nn.Module): def __init__(self): super(TOP1Loss, self).__init__() def forward(self, logit): """ Args: logit (BxB): Variable that stores the logits for the items in the mini-batch The first dimension corresponds to the batches, and the second dimension corresponds to sampled number of items to evaluate """ diff = -(logit.diag().view(-1, 1).expand_as(logit) - logit) loss = torch.sigmoid(diff).mean() + torch.sigmoid(logit ** 2).mean() return loss class TOP1_max(nn.Module): def __init__(self): super(TOP1_max, self).__init__() def forward(self, logit): logit_softmax = F.softmax(logit, dim=1) diff = -(logit.diag().view(-1, 1).expand_as(logit) - logit) loss = torch.mean(logit_softmax * (torch.sigmoid(diff) + torch. sigmoid(logit ** 2))) return loss class LossFunction(nn.Module): def __init__(self, loss_type='TOP1', use_cuda=False): """ An abstract loss function that can supports custom loss functions compatible with PyTorch.""" super(LossFunction, self).__init__() self.loss_type = loss_type self.use_cuda = use_cuda if loss_type == 'CrossEntropy': self._loss_fn = SampledCrossEntropyLoss(use_cuda) elif loss_type == 'TOP1': self._loss_fn = TOP1Loss() elif loss_type == 'BPR': self._loss_fn = BPRLoss() elif loss_type == 'TOP1-max': self._loss_fn = TOP1_max() elif loss_type == 'BPR-max': self._loss_fn = BPR_max() else: raise NotImplementedError def forward(self, logit): return self._loss_fn(logit) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_neg_pow_sigmoid_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 r2 = rindex tmp0 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + r2, None) tmp2 = tmp0 - tmp1 tmp3 = -tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp1 * tmp1 tmp9 = tl.sigmoid(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 16.0 tmp14 = tmp7 / tmp13 tmp15 = tmp12 / tmp13 tmp16 = tmp14 + tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_neg_pow_sigmoid_sub_0[grid(1)](buf2, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class BPRLoss(nn.Module): def __init__(self): super(BPRLoss, self).__init__() def forward(self, logit): """ Args: logit (BxB): Variable that stores the logits for the items in the mini-batch The first dimension corresponds to the batches, and the second dimension corresponds to sampled number of items to evaluate """ diff = logit.diag().view(-1, 1).expand_as(logit) - logit loss = -torch.mean(F.logsigmoid(diff)) return loss class BPR_max(nn.Module): def __init__(self): super(BPR_max, self).__init__() def forward(self, logit): logit_softmax = F.softmax(logit, dim=1) diff = logit.diag().view(-1, 1).expand_as(logit) - logit loss = -torch.log(torch.mean(logit_softmax * torch.sigmoid(diff))) return loss class SampledCrossEntropyLoss(nn.Module): """ CrossEntropyLoss with n_classes = batch_size = the number of samples in the session-parallel mini-batch """ def __init__(self, use_cuda): """ Args: use_cuda (bool): whether to use cuda or not """ super(SampledCrossEntropyLoss, self).__init__() self.xe_loss = nn.CrossEntropyLoss() self.use_cuda = use_cuda def forward(self, logit): batch_size = logit.size(1) target = Variable(torch.arange(batch_size).long()) if self.use_cuda: target = target return self.xe_loss(logit, target) class TOP1Loss(nn.Module): def __init__(self): super(TOP1Loss, self).__init__() def forward(self, logit): """ Args: logit (BxB): Variable that stores the logits for the items in the mini-batch The first dimension corresponds to the batches, and the second dimension corresponds to sampled number of items to evaluate """ diff = -(logit.diag().view(-1, 1).expand_as(logit) - logit) loss = torch.sigmoid(diff).mean() + torch.sigmoid(logit ** 2).mean() return loss class TOP1_max(nn.Module): def __init__(self): super(TOP1_max, self).__init__() def forward(self, logit): logit_softmax = F.softmax(logit, dim=1) diff = -(logit.diag().view(-1, 1).expand_as(logit) - logit) loss = torch.mean(logit_softmax * (torch.sigmoid(diff) + torch. sigmoid(logit ** 2))) return loss class LossFunctionNew(nn.Module): def __init__(self, loss_type='TOP1', use_cuda=False): """ An abstract loss function that can supports custom loss functions compatible with PyTorch.""" super(LossFunctionNew, self).__init__() self.loss_type = loss_type self.use_cuda = use_cuda if loss_type == 'CrossEntropy': self._loss_fn = SampledCrossEntropyLoss(use_cuda) elif loss_type == 'TOP1': self._loss_fn = TOP1Loss() elif loss_type == 'BPR': self._loss_fn = BPRLoss() elif loss_type == 'TOP1-max': self._loss_fn = TOP1_max() elif loss_type == 'BPR-max': self._loss_fn = BPR_max() else: raise NotImplementedError def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hungthanhpham94/GRU4REC-pytorch
LossFunction
false
15,572
[ "Apache-2.0" ]
184
666b84264c4afae757fe55c6997dcf0a4da1d44e
https://github.com/hungthanhpham94/GRU4REC-pytorch/tree/666b84264c4afae757fe55c6997dcf0a4da1d44e
GEGLU
import torch import torch.nn.functional as F from torch import nn class GEGLU(nn.Module): def forward(self, x): x, gates = x.chunk(2, dim=-1) return F.gelu(gates) * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp9 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GEGLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
idolumbantobing/vit-pytorch
GEGLU
false
15,573
[ "MIT" ]
9,373
eb70d8dca041cc387b3e1f72d965d8814eeab29a
https://github.com/idolumbantobing/vit-pytorch/tree/eb70d8dca041cc387b3e1f72d965d8814eeab29a
BPRLoss
import torch import torch.utils.data import torch import torch.nn as nn class BPRLoss(nn.Module): def __init__(self): nn.Module.__init__(self) self.m = nn.LogSigmoid() def forward(self, positives, negatives): return -self.m(positives - negatives).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_log_sigmoid_forward_mean_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp3, tmp2) tmp5 = tl_math.abs(tmp2) tmp6 = -tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = libdevice.log1p(tmp7) tmp9 = tmp4 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = -tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_log_sigmoid_forward_mean_neg_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BPRLossNew(nn.Module): def __init__(self): nn.Module.__init__(self) self.m = nn.LogSigmoid() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
huoxusg/ScenarioMeta
BPRLoss
false
15,574
[ "MIT" ]
79
ce753da45a3d46ac08961ffc71b2131ae3f7e551
https://github.com/huoxusg/ScenarioMeta/tree/ce753da45a3d46ac08961ffc71b2131ae3f7e551
SirenLayer
import torch from torch import nn from math import sqrt class Sine(nn.Module): """Sine activation with scaling. Args: w0 (float): Omega_0 parameter from SIREN paper. """ def __init__(self, w0=1.0): super().__init__() self.w0 = w0 def forward(self, x): return torch.sin(self.w0 * x) class SirenLayer(nn.Module): """Implements a single SIREN layer. Args: dim_in (int): Dimension of input. dim_out (int): Dimension of output. w0 (float): c (float): c value from SIREN paper used for weight initialization. is_first (bool): Whether this is first layer of model. use_bias (bool): activation (torch.nn.Module): Activation function. If None, defaults to Sine activation. """ def __init__(self, dim_in, dim_out, w0=30.0, c=6.0, is_first=False, use_bias=True, activation=None): super().__init__() self.dim_in = dim_in self.is_first = is_first self.linear = nn.Linear(dim_in, dim_out, bias=use_bias) w_std = 1 / dim_in if self.is_first else sqrt(c / dim_in) / w0 nn.init.uniform_(self.linear.weight, -w_std, w_std) if use_bias: nn.init.uniform_(self.linear.bias, -w_std, w_std) self.activation = Sine(w0) if activation is None else activation def forward(self, x): out = self.linear(x) out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 30.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.sin(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class Sine(nn.Module): """Sine activation with scaling. Args: w0 (float): Omega_0 parameter from SIREN paper. """ def __init__(self, w0=1.0): super().__init__() self.w0 = w0 def forward(self, x): return torch.sin(self.w0 * x) class SirenLayerNew(nn.Module): """Implements a single SIREN layer. Args: dim_in (int): Dimension of input. dim_out (int): Dimension of output. w0 (float): c (float): c value from SIREN paper used for weight initialization. is_first (bool): Whether this is first layer of model. use_bias (bool): activation (torch.nn.Module): Activation function. If None, defaults to Sine activation. """ def __init__(self, dim_in, dim_out, w0=30.0, c=6.0, is_first=False, use_bias=True, activation=None): super().__init__() self.dim_in = dim_in self.is_first = is_first self.linear = nn.Linear(dim_in, dim_out, bias=use_bias) w_std = 1 / dim_in if self.is_first else sqrt(c / dim_in) / w0 nn.init.uniform_(self.linear.weight, -w_std, w_std) if use_bias: nn.init.uniform_(self.linear.bias, -w_std, w_std) self.activation = Sine(w0) if activation is None else activation def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
idgmatrix/coin
SirenLayer
false
15,575
[ "MIT" ]
84
2f2df0614ed4fc866d4b7715ee206081e08b9424
https://github.com/idgmatrix/coin/tree/2f2df0614ed4fc866d4b7715ee206081e08b9424
PEG
import torch from torch import nn class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEG(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, x): return self.proj(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEGNew(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, input_0): primals_1 = self.proj.fn.weight primals_2 = self.proj.fn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
idolumbantobing/vit-pytorch
PEG
false
15,576
[ "MIT" ]
9,373
eb70d8dca041cc387b3e1f72d965d8814eeab29a
https://github.com/idolumbantobing/vit-pytorch/tree/eb70d8dca041cc387b3e1f72d965d8814eeab29a
Probability
import torch import torch.nn as nn class Probability(nn.Module): """A layer that predicts the probabilities """ def __init__(self, n_primitives, input_channels, make_dense=False): super(Probability, self).__init__() self._n_primitives = n_primitives self._make_dense = make_dense if self._make_dense: self._fc = nn.Conv3d(input_channels, input_channels, 1) self._nonlin = nn.LeakyReLU(0.2, True) self._probability_layer = nn.Conv3d(input_channels, self. _n_primitives, 1) def forward(self, X): if self._make_dense: X = self._nonlin(self._fc(X)) probs = torch.sigmoid(self._probability_layer(X)).view(-1, self. _n_primitives) return probs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_primitives': 4, 'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = tmp3 * tmp5 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_sigmoid_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2 class ProbabilityNew(nn.Module): """A layer that predicts the probabilities """ def __init__(self, n_primitives, input_channels, make_dense=False): super(ProbabilityNew, self).__init__() self._n_primitives = n_primitives self._make_dense = make_dense if self._make_dense: self._fc = nn.Conv3d(input_channels, input_channels, 1) self._nonlin = nn.LeakyReLU(0.2, True) self._probability_layer = nn.Conv3d(input_channels, self. _n_primitives, 1) def forward(self, input_0): primals_1 = self._probability_layer.weight primals_2 = self._probability_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ianhuang0630/CSQ
Probability
false
15,577
[ "MIT" ]
98
5f1fe99a8d9da73692643b3911d675dce269a03d
https://github.com/ianhuang0630/CSQ/tree/5f1fe99a8d9da73692643b3911d675dce269a03d
Mlp
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data.distributed class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.act = act_layer() self.drop = nn.Dropout(drop) self.fc1 = nn.Conv2d(in_features, hidden_features, 1, 1) self.fc2 = nn.Conv2d(hidden_features, out_features, 1, 1) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_gelu_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf4, primals_1, primals_3, primals_4, buf1, buf2 class MlpNew(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.act = act_layer() self.drop = nn.Dropout(drop) self.fc1 = nn.Conv2d(in_features, hidden_features, 1, 1) self.fc2 = nn.Conv2d(hidden_features, out_features, 1, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
iamhankai/ghostnet
Mlp
false
15,578
[ "BSD-3-Clause" ]
220
1262dacffdea62f9983ef0231177aea720e25f12
https://github.com/iamhankai/ghostnet/tree/1262dacffdea62f9983ef0231177aea720e25f12
GatedLinearUnit
import torch import torch.nn as nn import torch as th import torch.nn.functional as F class GatedLinearUnit(nn.Module): def forward(self, x, mask): x = th.cat((x, mask), 1) return F.glu(x, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_glu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = 4 + x1 tmp13 = tmp11 < tmp3 tmp14 = tl.load(in_ptr0 + (x0 + 16 * (4 + x1) + 64 * x2), tmp13 & xmask, other=0.0) tmp15 = tmp11 >= tmp3 tmp17 = tl.load(in_ptr1 + (x0 + 16 * x1 + 64 * x2), tmp15 & xmask, other=0.0) tmp18 = tl.where(tmp13, tmp14, tmp17) tmp19 = tl.sigmoid(tmp18) tmp20 = tmp10 * tmp19 tl.store(out_ptr0 + x3, tmp20, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_glu_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class GatedLinearUnitNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
iamshant/mmt
GatedLinearUnit
false
15,579
[ "Apache-2.0" ]
201
2716e9037f2d59e9aadd92d607bcf753f0146946
https://github.com/iamshant/mmt/tree/2716e9037f2d59e9aadd92d607bcf753f0146946
ReduceDim
import torch import torch.nn as nn import torch.nn.functional as F class ReduceDim(nn.Module): def __init__(self, input_dimension, output_dimension): super(ReduceDim, self).__init__() self.fc = nn.Linear(input_dimension, output_dimension) def forward(self, x): x = self.fc(x) x = F.normalize(x, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dimension': 4, 'output_dimension': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class ReduceDimNew(nn.Module): def __init__(self, input_dimension, output_dimension): super(ReduceDimNew, self).__init__() self.fc = nn.Linear(input_dimension, output_dimension) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
iamshant/mmt
ReduceDim
false
15,580
[ "Apache-2.0" ]
201
2716e9037f2d59e9aadd92d607bcf753f0146946
https://github.com/iamshant/mmt/tree/2716e9037f2d59e9aadd92d607bcf753f0146946
L2Norm
import torch from torch import nn class L2Norm(nn.Module): def forward(self, x, eps=1e-06): norm = x.norm(dim=1, keepdim=True).clamp(min=eps) return x / norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class L2NormNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
idolumbantobing/vit-pytorch
L2Norm
false
15,581
[ "MIT" ]
9,373
eb70d8dca041cc387b3e1f72d965d8814eeab29a
https://github.com/idolumbantobing/vit-pytorch/tree/eb70d8dca041cc387b3e1f72d965d8814eeab29a
BilinearWithBias
from torch.nn import Module import math import torch from torch.nn.parameter import Parameter import torch.nn.functional as F from torch.nn.modules import Module class BilinearWithBias(Module): def __init__(self, in1_features, in2_features, out_features): super(BilinearWithBias, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.out_features = out_features self.W = Parameter(torch.Tensor(out_features, in1_features, in2_features)) self.V1 = Parameter(torch.Tensor(out_features, in1_features)) self.V2 = Parameter(torch.Tensor(out_features, in2_features)) self.bias = Parameter(torch.Tensor(out_features)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(1)) self.W.data.uniform_(-stdv, stdv) self.V1.data.uniform_(-stdv, stdv) self.V2.data.uniform_(-stdv, stdv) self.bias.data.uniform_(-stdv, stdv) def forward(self, input1, input2): result = F.bilinear(input1, input2, self.W, self.bias) result += F.linear(input1, self.V1, None) result += F.linear(input2, self.V2, None) return result def extra_repr(self): return ('in1_features={}, in2_features={}, out_features={}, bias={}' .format(self.in1_features, self.in2_features, self.out_features, self.bias is not None)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in1_features': 4, 'in2_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torch.nn.parameter import Parameter from torch.nn.modules import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_4, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor( primals_3, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf4, primals_2, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_2 return buf4, primals_3, primals_4 class BilinearWithBiasNew(Module): def __init__(self, in1_features, in2_features, out_features): super(BilinearWithBiasNew, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.out_features = out_features self.W = Parameter(torch.Tensor(out_features, in1_features, in2_features)) self.V1 = Parameter(torch.Tensor(out_features, in1_features)) self.V2 = Parameter(torch.Tensor(out_features, in2_features)) self.bias = Parameter(torch.Tensor(out_features)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(1)) self.W.data.uniform_(-stdv, stdv) self.V1.data.uniform_(-stdv, stdv) self.V2.data.uniform_(-stdv, stdv) self.bias.data.uniform_(-stdv, stdv) def extra_repr(self): return ('in1_features={}, in2_features={}, out_features={}, bias={}' .format(self.in1_features, self.in2_features, self.out_features, self.bias is not None)) def forward(self, input_0, input_1): primals_1 = self.W primals_5 = self.V1 primals_6 = self.V2 primals_2 = self.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
ianyfan/depccg
BilinearWithBias
false
15,582
[ "MIT" ]
75
dda01a72ad09ee36fb5d626a473cc2a0d267c57b
https://github.com/ianyfan/depccg/tree/dda01a72ad09ee36fb5d626a473cc2a0d267c57b
Refine
import torch import torch.nn import torch.nn.functional as F import torch.utils.data.dataset class ResBlock(torch.nn.Module): def __init__(self, indim, outdim=None, stride=1): super(ResBlock, self).__init__() if outdim is None: outdim = indim if indim == outdim and stride == 1: self.downsample = None else: self.downsample = torch.nn.Conv2d(indim, outdim, kernel_size=3, padding=1, stride=stride) self.conv1 = torch.nn.Conv2d(indim, outdim, kernel_size=3, padding= 1, stride=stride) self.conv2 = torch.nn.Conv2d(outdim, outdim, kernel_size=3, padding=1) def forward(self, x): r = self.conv1(F.relu(x)) r = self.conv2(F.relu(r)) if self.downsample is not None: x = self.downsample(x) return x + r class Refine(torch.nn.Module): def __init__(self, inplanes, planes, scale_factor=2): super(Refine, self).__init__() self.convFS = torch.nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=1) self.ResFS = ResBlock(planes, planes) self.ResMM = ResBlock(planes, planes) self.scale_factor = scale_factor def forward(self, f, pm): s = self.ResFS(self.convFS(f)) m = s + F.interpolate(pm, scale_factor=self.scale_factor, mode= 'bilinear', align_corners=False) m = self.ResMM(m) return m def get_inputs(): return [torch.rand([4, 4, 16, 16]), torch.rand([4, 4, 8, 8])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn import torch.nn.functional as F import torch.utils.data.dataset assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_convolution_mul_relu_sub_2( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x6 = xindex x4 = xindex // 256 % 4 tmp44 = tl.load(in_out_ptr1 + x6, None) tmp45 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr2 + x6, None) tmp48 = tl.load(in_ptr3 + x4, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 7, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = x0 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 + tmp2 tmp16 = tmp15 * tmp2 tmp17 = tmp16 - tmp2 tmp18 = triton_helpers.maximum(tmp17, tmp6) tmp19 = tmp18.to(tl.int32) tmp20 = tmp19 + tmp9 tmp21 = triton_helpers.minimum(tmp20, tmp11) tmp22 = tl.load(in_ptr0 + (tmp21 + 8 * tmp12 + 64 * x2), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (tmp19 + 8 * tmp12 + 64 * x2), None, eviction_policy='evict_last') tmp24 = tmp22 - tmp23 tmp25 = tmp19.to(tl.float32) tmp26 = tmp18 - tmp25 tmp27 = triton_helpers.maximum(tmp26, tmp6) tmp28 = 1.0 tmp29 = triton_helpers.minimum(tmp27, tmp28) tmp30 = tmp24 * tmp29 tmp31 = tmp23 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp19 + 8 * tmp8 + 64 * x2), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp21 + 8 * tmp8 + 64 * x2), None, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp8.to(tl.float32) tmp39 = tmp7 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp6) tmp41 = triton_helpers.minimum(tmp40, tmp28) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tmp46 = tmp44 + tmp45 tmp49 = tmp47 + tmp48 tmp50 = tmp46 + tmp49 tmp51 = tmp50 + tmp43 tmp52 = tl.full([1], 0, tl.int32) tmp53 = triton_helpers.maximum(tmp52, tmp51) tl.store(in_out_ptr1 + x6, tmp51, None) tl.store(out_ptr0 + x6, tmp53, None) @triton.jit def triton_poi_fused_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16, 16), (1024, 256, 16, 1)) buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(4096)](buf0, primals_2, buf1, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 16, 16), (1024, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(4096)](buf3, primals_5, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1)) buf8 = buf0 del buf0 buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_convolution_mul_relu_sub_2[ grid(4096)](buf8, primals_8, primals_2, buf4, primals_7, buf9, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_2 del primals_7 del primals_8 buf10 = extern_kernels.convolution(buf9, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 16, 16), (1024, 256, 16, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_1[grid(4096)](buf11, primals_10, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_10 buf12 = extern_kernels.convolution(buf11, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 16, 16), (1024, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_add_convolution_3[grid(4096)](buf13, buf8, primals_12, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del primals_12 return (buf13, primals_1, primals_3, primals_4, primals_6, primals_9, primals_11, buf1, buf3, buf9, buf11) class ResBlock(torch.nn.Module): def __init__(self, indim, outdim=None, stride=1): super(ResBlock, self).__init__() if outdim is None: outdim = indim if indim == outdim and stride == 1: self.downsample = None else: self.downsample = torch.nn.Conv2d(indim, outdim, kernel_size=3, padding=1, stride=stride) self.conv1 = torch.nn.Conv2d(indim, outdim, kernel_size=3, padding= 1, stride=stride) self.conv2 = torch.nn.Conv2d(outdim, outdim, kernel_size=3, padding=1) def forward(self, x): r = self.conv1(F.relu(x)) r = self.conv2(F.relu(r)) if self.downsample is not None: x = self.downsample(x) return x + r class RefineNew(torch.nn.Module): def __init__(self, inplanes, planes, scale_factor=2): super(RefineNew, self).__init__() self.convFS = torch.nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=1) self.ResFS = ResBlock(planes, planes) self.ResMM = ResBlock(planes, planes) self.scale_factor = scale_factor def forward(self, input_0, input_1): primals_1 = self.convFS.weight primals_2 = self.convFS.bias primals_4 = self.ResFS.conv1.weight primals_5 = self.ResFS.conv1.bias primals_6 = self.ResFS.conv2.weight primals_7 = self.ResFS.conv2.bias primals_9 = self.ResMM.conv1.weight primals_10 = self.ResMM.conv1.bias primals_11 = self.ResMM.conv2.weight primals_12 = self.ResMM.conv2.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
hzxie/RMNet
Refine
false
15,583
[ "MIT" ]
66
32a16f9c9473463a41dd6e95f72b06dd830fc1eb
https://github.com/hzxie/RMNet/tree/32a16f9c9473463a41dd6e95f72b06dd830fc1eb
Bilinear
import torch import torch.nn as nn class Bilinear(nn.Module): def __init__(self, size): super(Bilinear, self).__init__() self.size = size self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size)) self.reset_parameters() def reset_parameters(self): params = [p for p in self.parameters() if p.requires_grad] for i, param in enumerate(params): param.data.normal_() def forward(self, vector1, vector2): bma = torch.matmul(vector1, self.mat).unsqueeze(1) ba = torch.matmul(bma, vector2.unsqueeze(2)).view(-1, 1) return ba def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex // 256 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(1024)](primals_3, buf2, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), out=buf3) del buf1 return reinterpret_tensor(buf3, (1024, 1), (1, 1), 0), reinterpret_tensor( buf2, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class BilinearNew(nn.Module): def __init__(self, size): super(BilinearNew, self).__init__() self.size = size self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size)) self.reset_parameters() def reset_parameters(self): params = [p for p in self.parameters() if p.requires_grad] for i, param in enumerate(params): param.data.normal_() def forward(self, input_0, input_1): primals_1 = self.mat primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
iesl/diora-public
Bilinear
false
15,584
[ "Apache-2.0" ]
81
110b9b0881907ec049dd60cd93ff6ef084582b3b
https://github.com/iesl/diora-public/tree/110b9b0881907ec049dd60cd93ff6ef084582b3b
Sine
import torch from torch import nn class Sine(nn.Module): """Sine activation with scaling. Args: w0 (float): Omega_0 parameter from SIREN paper. """ def __init__(self, w0=1.0): super().__init__() self.w0 = w0 def forward(self, x): return torch.sin(self.w0 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.sin(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SineNew(nn.Module): """Sine activation with scaling. Args: w0 (float): Omega_0 parameter from SIREN paper. """ def __init__(self, w0=1.0): super().__init__() self.w0 = w0 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
idgmatrix/coin
Sine
false
15,585
[ "MIT" ]
84
2f2df0614ed4fc866d4b7715ee206081e08b9424
https://github.com/idgmatrix/coin/tree/2f2df0614ed4fc866d4b7715ee206081e08b9424
ArcFaceLoss
import math import torch import torch.nn as nn import torch.nn.functional as F class DenseCrossEntropy(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() class ArcFaceLoss(nn.Module): def __init__(self, s=30.0, m=0.5): super().__init__() self.crit = DenseCrossEntropy() self.s = s self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.th = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m def forward(self, logits, labels): labels = F.one_hot(labels.long(), logits.size(1)).float() logits = logits.float() cosine = logits sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m phi = torch.where(cosine > self.th, phi, cosine - self.mm) output = labels * phi + (1.0 - labels) * cosine output *= self.s loss = self.crit(output, labels) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_arange_eq_gt_mul_pow_rsub_sqrt_sub_where_0( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp70 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([1], 0, tl.int64) tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = -0.8775825618903726 tmp8 = tmp6 > tmp7 tmp9 = 0.8775825618903728 tmp10 = tmp6 * tmp9 tmp11 = tmp6 * tmp6 tmp12 = 1.0 tmp13 = tmp12 - tmp11 tmp14 = libdevice.sqrt(tmp13) tmp15 = 0.479425538604203 tmp16 = tmp14 * tmp15 tmp17 = tmp10 - tmp16 tmp18 = 0.23971276930210156 tmp19 = tmp6 - tmp18 tmp20 = tl.where(tmp8, tmp17, tmp19) tmp21 = tmp5 * tmp20 tmp22 = tmp12 - tmp5 tmp23 = tmp22 * tmp6 tmp24 = tmp21 + tmp23 tmp25 = tmp24 * tmp12 tmp26 = tl.full([1], 1, tl.int64) tmp27 = tmp1 == tmp26 tmp28 = tmp27.to(tl.int64) tmp29 = tmp28.to(tl.float32) tmp31 = tmp30 > tmp7 tmp32 = tmp30 * tmp9 tmp33 = tmp30 * tmp30 tmp34 = tmp12 - tmp33 tmp35 = libdevice.sqrt(tmp34) tmp36 = tmp35 * tmp15 tmp37 = tmp32 - tmp36 tmp38 = tmp30 - tmp18 tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tmp29 * tmp39 tmp41 = tmp12 - tmp29 tmp42 = tmp41 * tmp30 tmp43 = tmp40 + tmp42 tmp44 = tmp43 * tmp12 tmp45 = triton_helpers.maximum(tmp25, tmp44) tmp46 = tl.full([1], 2, tl.int64) tmp47 = tmp1 == tmp46 tmp48 = tmp47.to(tl.int64) tmp49 = tmp48.to(tl.float32) tmp51 = tmp50 > tmp7 tmp52 = tmp50 * tmp9 tmp53 = tmp50 * tmp50 tmp54 = tmp12 - tmp53 tmp55 = libdevice.sqrt(tmp54) tmp56 = tmp55 * tmp15 tmp57 = tmp52 - tmp56 tmp58 = tmp50 - tmp18 tmp59 = tl.where(tmp51, tmp57, tmp58) tmp60 = tmp49 * tmp59 tmp61 = tmp12 - tmp49 tmp62 = tmp61 * tmp50 tmp63 = tmp60 + tmp62 tmp64 = tmp63 * tmp12 tmp65 = triton_helpers.maximum(tmp45, tmp64) tmp66 = tl.full([1], 3, tl.int64) tmp67 = tmp1 == tmp66 tmp68 = tmp67.to(tl.int64) tmp69 = tmp68.to(tl.float32) tmp71 = tmp70 > tmp7 tmp72 = tmp70 * tmp9 tmp73 = tmp70 * tmp70 tmp74 = tmp12 - tmp73 tmp75 = libdevice.sqrt(tmp74) tmp76 = tmp75 * tmp15 tmp77 = tmp72 - tmp76 tmp78 = tmp70 - tmp18 tmp79 = tl.where(tmp71, tmp77, tmp78) tmp80 = tmp69 * tmp79 tmp81 = tmp12 - tmp69 tmp82 = tmp81 * tmp70 tmp83 = tmp80 + tmp82 tmp84 = tmp83 * tmp12 tmp85 = triton_helpers.maximum(tmp65, tmp84) tl.store(out_ptr0 + x2, tmp85, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_eq_gt_mul_pow_rsub_sqrt_sub_where_1( in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x4 = xindex % 256 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = x0 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = -0.8775825618903726 tmp8 = tmp6 > tmp7 tmp9 = 0.8775825618903728 tmp10 = tmp6 * tmp9 tmp11 = tmp6 * tmp6 tmp12 = 1.0 tmp13 = tmp12 - tmp11 tmp14 = libdevice.sqrt(tmp13) tmp15 = 0.479425538604203 tmp16 = tmp14 * tmp15 tmp17 = tmp10 - tmp16 tmp18 = 0.23971276930210156 tmp19 = tmp6 - tmp18 tmp20 = tl.where(tmp8, tmp17, tmp19) tmp21 = tmp5 * tmp20 tmp22 = tmp12 - tmp5 tmp23 = tmp22 * tmp6 tmp24 = tmp21 + tmp23 tmp25 = tmp24 * tmp12 tmp27 = tmp25 - tmp26 tmp28 = 30.0 tmp29 = tmp27 * tmp28 tl.store(out_ptr0 + x5, tmp29, xmask) @triton.jit def triton_red_fused__log_softmax__to_copy_arange_eq_mean_mul_neg_sum_2( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp46 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), rmask, eviction_policy= 'evict_last', other=0.0) tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), rmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), rmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + r0, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp14.to(tl.int64) tmp16 = tl.full([1, 1], 0, tl.int64) tmp17 = tmp15 == tmp16 tmp18 = tmp17.to(tl.int64) tmp19 = tmp18.to(tl.float32) tmp20 = tmp13 * tmp19 tmp21 = tmp2 - tmp11 tmp22 = -tmp21 tmp23 = tl.full([1, 1], 1, tl.int64) tmp24 = tmp15 == tmp23 tmp25 = tmp24.to(tl.int64) tmp26 = tmp25.to(tl.float32) tmp27 = tmp22 * tmp26 tmp28 = tmp20 + tmp27 tmp29 = tmp5 - tmp11 tmp30 = -tmp29 tmp31 = tl.full([1, 1], 2, tl.int64) tmp32 = tmp15 == tmp31 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33.to(tl.float32) tmp35 = tmp30 * tmp34 tmp36 = tmp28 + tmp35 tmp37 = tmp8 - tmp11 tmp38 = -tmp37 tmp39 = tl.full([1, 1], 3, tl.int64) tmp40 = tmp15 == tmp39 tmp41 = tmp40.to(tl.int64) tmp42 = tmp41.to(tl.float32) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = _tmp46 + tmp45 _tmp46 = tl.where(rmask, tmp47, _tmp46) tmp46 = tl.sum(_tmp46, 1)[:, None] tmp48 = 256.0 tmp49 = tmp46 / tmp48 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp49, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_add_arange_eq_gt_mul_pow_rsub_sqrt_sub_where_0[ grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__to_copy_add_arange_eq_gt_mul_pow_rsub_sqrt_sub_where_1[ grid(1024)](arg0_1, arg1_1, buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 del buf0 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__log_softmax__to_copy_arange_eq_mean_mul_neg_sum_2[ grid(1)](buf4, buf1, arg0_1, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del arg0_1 del buf1 return buf4, class DenseCrossEntropy(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() class ArcFaceLossNew(nn.Module): def __init__(self, s=30.0, m=0.5): super().__init__() self.crit = DenseCrossEntropy() self.s = s self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.th = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
i-pan/kaggle-melanoma
ArcFaceLoss
false
15,586
[ "MIT" ]
68
caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
https://github.com/i-pan/kaggle-melanoma/tree/caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
A2CCritic
import torch import torch as t import torch.nn as nn class A2CCritic(nn.Module): def __init__(self, state_dim): super().__init__() self.fc1 = nn.Linear(state_dim, 16) self.fc2 = nn.Linear(16, 16) self.fc3 = nn.Linear(16, 1) def forward(self, state): v = t.relu(self.fc1(state)) v = t.relu(self.fc2(v)) v = self.fc3(v) return v def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (1, 16), (16, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3, primals_5, buf6, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0), primals_6, buf6, primals_4, buf7 class A2CCriticNew(nn.Module): def __init__(self, state_dim): super().__init__() self.fc1 = nn.Linear(state_dim, 16) self.fc2 = nn.Linear(16, 16) self.fc3 = nn.Linear(16, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
iffiX/machin
A2CCritic
false
15,587
[ "MIT" ]
287
7fa986b1bafdefff117d6ff73d14644a5488de9d
https://github.com/iffiX/machin/tree/7fa986b1bafdefff117d6ff73d14644a5488de9d
FCDiscriminator_Local
import torch import torch.nn as nn class FCDiscriminator_Local(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator_Local, self).__init__() self.conv1 = nn.Conv2d(num_classes + 2048, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 4, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear') def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.classifier(x) x = self.up_sample(x) return x def get_inputs(): return [torch.rand([4, 2052, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused__to_copy_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.03125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_4(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.03125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.03125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 128 % 128 x0 = xindex % 128 x2 = xindex // 16384 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp13 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tmp12 = tmp9 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tmp13 < 0 tmp16 = tl.where(tmp15, tmp14, tmp13) tmp17 = tl.load(in_ptr2 + (tmp16 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tmp18 = tmp17 + tmp11 tmp19 = tmp18 - tmp12 tmp21 = tmp19 * tmp20 tmp22 = tmp12 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tmp27 = tl.load(in_ptr2 + (tmp8 + 4 * tmp26 + 16 * x2), None, eviction_policy='evict_last') tmp28 = tmp27 + tmp11 tmp29 = tl.load(in_ptr2 + (tmp16 + 4 * tmp26 + 16 * x2), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp11 tmp31 = tmp30 - tmp28 tmp32 = tmp31 * tmp20 tmp33 = tmp28 + tmp32 tmp34 = tmp33 - tmp22 tmp36 = tmp34 * tmp35 tmp37 = tmp22 + tmp36 tl.store(in_out_ptr0 + x3, tmp37, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 2052, 4, 4), (32832, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 2052, 64, 64), (8404992, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (1, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(65536)](buf5, primals_7, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1)) buf7 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_3[grid(128)](buf7, 128, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_4[grid(128)](buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused__to_copy_3[grid(128)](buf9, 128, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused_add_clamp_4[grid(128)](buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(128)](buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((128, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(128)](buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 1, 128, 128), (16384, 65536, 128, 1), torch.float32) buf15 = reinterpret_tensor(buf14, (4, 1, 128, 128), (16384, 16384, 128, 1), 0) del buf14 triton_poi_fused__unsafe_index_add_convolution_mul_sub_6[grid(65536)]( buf15, buf7, buf9, buf6, primals_9, buf10, buf11, buf8, buf13, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_9 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf7, buf8, buf9, buf10, buf11, buf13) class FCDiscriminator_LocalNew(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator_LocalNew, self).__init__() self.conv1 = nn.Conv2d(num_classes + 2048, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 4, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear') def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.classifier.weight primals_9 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
gabriel-tjio/ASH
FCDiscriminator_Local
false
15,588
[ "MIT" ]
300
40ae044a7ca1809f91ba89671d223a96eda327da
https://github.com/gabriel-tjio/ASH/tree/40ae044a7ca1809f91ba89671d223a96eda327da
A2CActorDisc
import torch from torch.distributions import Categorical import torch as t import torch.nn as nn class A2CActorDisc(nn.Module): def __init__(self, state_dim, action_num): super().__init__() self.fc1 = nn.Linear(state_dim, 16) self.fc2 = nn.Linear(16, 16) self.fc3 = nn.Linear(16, action_num) def forward(self, state, action=None): a = t.relu(self.fc1(state)) a = t.relu(self.fc2(a)) probs = t.softmax(self.fc3(a), dim=1) dist = Categorical(probs=probs) act = action if action is not None else dist.sample() act_entropy = dist.entropy() act_log_prob = dist.log_prob(act.flatten()) return act, act_log_prob, act_entropy def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf6 buf8 = torch.ops.aten.multinomial.default(buf7, 1, True) buf9 = buf8 del buf8 return reinterpret_tensor(buf9, (4,), (1,), 0 ), buf7, primals_3, buf1, buf3, buf4, primals_6, primals_4 class A2CActorDiscNew(nn.Module): def __init__(self, state_dim, action_num): super().__init__() self.fc1 = nn.Linear(state_dim, 16) self.fc2 = nn.Linear(16, 16) self.fc3 = nn.Linear(16, action_num) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1], output[2]
iffiX/machin
A2CActorDisc
false
15,589
[ "MIT" ]
287
7fa986b1bafdefff117d6ff73d14644a5488de9d
https://github.com/iffiX/machin/tree/7fa986b1bafdefff117d6ff73d14644a5488de9d
LanguageModelCriterion
import torch import torch.nn as nn from torch.autograd import * def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class LanguageModelCriterion(nn.Module): def __init__(self): super(LanguageModelCriterion, self).__init__() def forward(self, input, target, mask): target = target[:, :input.size(1)] mask = mask[:, :input.size(1)] input = to_contiguous(input).view(-1, input.size(2)) target = to_contiguous(target).view(-1, 1) mask = to_contiguous(mask).view(-1, 1) output = -input.gather(1, target) * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_gather_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class LanguageModelCriterionNew(nn.Module): def __init__(self): super(LanguageModelCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ifty1987/PORL
LanguageModelCriterion
false
15,590
[ "MIT" ]
61
979d5462b5c74bcca8013d9c54d86b676d3e2d43
https://github.com/ifty1987/PORL/tree/979d5462b5c74bcca8013d9c54d86b676d3e2d43