entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
EqualLinear
import math import torch import torch.nn.functional as F from torch import nn class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): """ :param in_dim: :param out_dim: :param bias: :param bias_init: :param lr_mul: 0.01 :param activation: None: Linear; fused_leaky_relu """ super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation if self.activation is not None: self.act_layer = nn.LeakyReLU(0.2) self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul pass def forward(self, input): out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) if self.activation: out = self.act_layer(out) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}), activation={self.activation}' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): """ :param in_dim: :param out_dim: :param bias: :param bias_init: :param lr_mul: 0.01 :param activation: None: Linear; fused_leaky_relu """ super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation if self.activation is not None: self.act_layer = nn.LeakyReLU(0.2) self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul pass def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}), activation={self.activation}' ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
PeterouZh/CIPS-3D
EqualLinear
false
14,175
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
EmbeddingLayer
import torch import torch.nn as nn class EmbeddingLayer(nn.Module): def __init__(self, in_channel, out_channel, img_size, patch_size): super(EmbeddingLayer, self).__init__() self.embedding1 = nn.Conv2d(in_channel, out_channel, kernel_size= patch_size, stride=patch_size, padding=0) def forward(self, x): out = self.embedding1(x).flatten(2).transpose(1, 2) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'img_size': 4, 'patch_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0 ), primals_1, primals_3 class EmbeddingLayerNew(nn.Module): def __init__(self, in_channel, out_channel, img_size, patch_size): super(EmbeddingLayerNew, self).__init__() self.embedding1 = nn.Conv2d(in_channel, out_channel, kernel_size= patch_size, stride=patch_size, padding=0) def forward(self, input_0): primals_1 = self.embedding1.weight primals_2 = self.embedding1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Phoenix1153/ViT_OOD_generalization
EmbeddingLayer
false
14,176
[ "MIT" ]
51
7c5b542e5f5279032c9cd20667cc9e09a86b653d
https://github.com/Phoenix1153/ViT_OOD_generalization/tree/7c5b542e5f5279032c9cd20667cc9e09a86b653d
GroupScaling1D
import torch from torch import nn class GroupScaling1D(nn.Module): """Scales inputs by the second moment for the entire layer.""" def __init__(self, eps=1e-05, group_num=4): super(GroupScaling1D, self).__init__() self.eps = eps self.group_num = group_num def extra_repr(self): return f'eps={self.eps}, group={self.group_num}' def forward(self, input): T, B, C = input.shape[0], input.shape[1], input.shape[2] Cg = C // self.group_num gn_input = input.contiguous().reshape(T, B, self.group_num, Cg) moment2 = torch.repeat_interleave(torch.mean(gn_input * gn_input, dim=3, keepdim=True), repeats=Cg, dim=-1).contiguous().reshape(T, B, C) return input / torch.sqrt(moment2 + self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 1])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp3 = 1.0 tmp4 = tmp2 / tmp3 tmp5 = 1e-05 tmp6 = tmp4 + tmp5 tmp7 = libdevice.sqrt(tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x5, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 1), (16, 4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GroupScaling1DNew(nn.Module): """Scales inputs by the second moment for the entire layer.""" def __init__(self, eps=1e-05, group_num=4): super(GroupScaling1DNew, self).__init__() self.eps = eps self.group_num = group_num def extra_repr(self): return f'eps={self.eps}, group={self.group_num}' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Piki1989/spacetimeformer
GroupScaling1D
false
14,177
[ "MIT" ]
209
7e0caf17dd03e5d25e2766c4f7132805779bcc40
https://github.com/Piki1989/spacetimeformer/tree/7e0caf17dd03e5d25e2766c4f7132805779bcc40
FiLMLayerEqualFC
import math import torch import torch.nn.functional as F from torch import nn class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): """ :param in_dim: :param out_dim: :param bias: :param bias_init: :param lr_mul: 0.01 :param activation: None: Linear; fused_leaky_relu """ super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation if self.activation is not None: self.act_layer = nn.LeakyReLU(0.2) self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul pass def forward(self, input): out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) if self.activation: out = self.act_layer(out) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}), activation={self.activation}' ) class FiLMLayerEqualFC(nn.Module): def __init__(self, input_dim, hidden_dim): super().__init__() self.layer = EqualLinear(input_dim, hidden_dim) pass def forward(self, x, freq, phase_shift): """ :param x: (b, num_points, d) :param freq: (b, d) :param phase_shift: (b, d) :return: """ x = self.layer(x) freq = freq.unsqueeze(1).expand_as(x) phase_shift = phase_shift.unsqueeze(1).expand_as(x) out = torch.sin(freq * x + phase_shift) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_sin_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl_math.sin(tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (16, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_sin_2[grid(64)](primals_4, buf2, primals_5, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_4, primals_5, reinterpret_tensor(primals_3, (16, 4 ), (4, 1), 0), buf2 class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): """ :param in_dim: :param out_dim: :param bias: :param bias_init: :param lr_mul: 0.01 :param activation: None: Linear; fused_leaky_relu """ super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation if self.activation is not None: self.act_layer = nn.LeakyReLU(0.2) self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul pass def forward(self, input): out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) if self.activation: out = self.act_layer(out) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}), activation={self.activation}' ) class FiLMLayerEqualFCNew(nn.Module): def __init__(self, input_dim, hidden_dim): super().__init__() self.layer = EqualLinear(input_dim, hidden_dim) pass def forward(self, input_0, input_1, input_2): primals_1 = self.layer.weight primals_2 = self.layer.bias primals_3 = input_0 primals_4 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
PeterouZh/CIPS-3D
FiLMLayerEqualFC
false
14,178
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
FiLMLayer_PreSin
import torch import numpy as np from torch import nn class FiLMLayer_PreSin(nn.Module): def __init__(self, in_dim, out_dim, style_dim, use_style_fc=True, which_linear=nn.Linear, **kwargs): super(FiLMLayer_PreSin, self).__init__() self.in_dim = in_dim self.out_dim = out_dim self.style_dim = style_dim self.use_style_fc = use_style_fc self.linear = which_linear(in_dim, out_dim) nn.init.uniform_(self.linear.weight, -np.sqrt(9 / in_dim), np.sqrt( 9 / in_dim)) if use_style_fc: self.gain_fc = which_linear(style_dim, out_dim) self.bias_fc = which_linear(style_dim, out_dim) self.gain_fc.weight.data.mul_(0.25) self.gain_fc.bias.data.fill_(1) self.bias_fc.weight.data.mul_(0.25) else: self.style_dim = out_dim * 2 pass def forward(self, x, style): """ :param x: (b, c) or (b, n, c) :param style: (b, c) :return: """ if self.use_style_fc: gain = self.gain_fc(style) bias = self.bias_fc(style) else: style = rearrange(style, 'b (n c) -> b n c', n=2) gain, bias = style.unbind(dim=1) if x.dim() == 3: gain = rearrange(gain, 'b c -> b 1 c') bias = rearrange(bias, 'b c -> b 1 c') elif x.dim() == 2: pass else: assert 0 x = self.linear(x) x = torch.sin(x) out = gain * x + bias return out def __repr__(self): s = ( f'{self.__class__.__name__}(in_dim={self.in_dim}, out_dim={self.out_dim}, style_dim={self.style_dim}, use_style_fc={self.use_style_fc}, )' ) return s def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sin_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_out_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tl_math.sin(tmp1) tmp3 = tmp0 * tmp2 tmp6 = tmp4 + tmp5 tmp7 = tmp3 + tmp6 tl.store(in_out_ptr0 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor( primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, primals_6, reinterpret_tensor( primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_mul_sin_0[grid(16)](buf3, buf0, buf2, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_3, primals_6, buf0, buf2 class FiLMLayer_PreSinNew(nn.Module): def __init__(self, in_dim, out_dim, style_dim, use_style_fc=True, which_linear=nn.Linear, **kwargs): super(FiLMLayer_PreSinNew, self).__init__() self.in_dim = in_dim self.out_dim = out_dim self.style_dim = style_dim self.use_style_fc = use_style_fc self.linear = which_linear(in_dim, out_dim) nn.init.uniform_(self.linear.weight, -np.sqrt(9 / in_dim), np.sqrt( 9 / in_dim)) if use_style_fc: self.gain_fc = which_linear(style_dim, out_dim) self.bias_fc = which_linear(style_dim, out_dim) self.gain_fc.weight.data.mul_(0.25) self.gain_fc.bias.data.fill_(1) self.bias_fc.weight.data.mul_(0.25) else: self.style_dim = out_dim * 2 pass def __repr__(self): s = ( f'{self.__class__.__name__}(in_dim={self.in_dim}, out_dim={self.out_dim}, style_dim={self.style_dim}, use_style_fc={self.use_style_fc}, )' ) return s def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = self.gain_fc.weight primals_5 = self.gain_fc.bias primals_4 = self.bias_fc.weight primals_8 = self.bias_fc.bias primals_6 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
PeterouZh/CIPS-3D
FiLMLayer_PreSin
false
14,179
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
EQ
import torch class EQ(torch.nn.Module): def __init__(self): super(EQ, self).__init__() def forward(self, x, y): return x == y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class EQNew(torch.nn.Module): def __init__(self): super(EQNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
EQ
false
14,180
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
ConditionalBatchNorm2d
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=0.0001): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] _w = w.view(height, -1) for _ in range(self.power_iterations): v = l2normalize(torch.matmul(_w.t(), u)) u = l2normalize(torch.matmul(_w, v)) sigma = u.dot(_w.mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class ConditionalBatchNorm2d(nn.Module): def __init__(self, num_features, num_classes, eps=0.0001, momentum=0.1): super().__init__() self.num_features = num_features self.bn = nn.BatchNorm2d(num_features, affine=False, eps=eps, momentum=momentum) self.gamma_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False)) self.beta_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False)) def forward(self, x, y): out = self.bn(x) gamma = self.gamma_embed(y) + 1 beta = self.beta_embed(y) out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1) return out def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_linalg_vector_norm_mv_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (4 + r0), None) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (8 + r0), None) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (12 + r0), None) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) @triton.jit def triton_per_fused_add_div_dot_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp16 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + 2) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp22 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + 3) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp5 = libdevice.sqrt(tmp4) tmp6 = 0.0001 tmp7 = tmp5 + tmp6 tmp8 = tmp2 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp12 / tmp7 tmp14 = tmp10 * tmp13 tmp15 = tmp9 + tmp14 tmp19 = tmp18 / tmp7 tmp20 = tmp16 * tmp19 tmp21 = tmp15 + tmp20 tmp25 = tmp24 / tmp7 tmp26 = tmp22 * tmp25 tmp27 = tmp21 + tmp26 tmp28 = tmp27 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.sum(tmp29, 1)[:, None] tmp32 = libdevice.sqrt(tmp31) tmp33 = tmp32 + tmp6 tmp34 = tmp27 / tmp33 tmp35 = tmp34 * tmp27 tmp36 = tl.broadcast_to(tmp35, [XBLOCK, RBLOCK]) tmp38 = tl.sum(tmp36, 1)[:, None] tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) @triton.jit def triton_poi_fused_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_no_training_add_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex // 16 x4 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x4, None) tmp4 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr4 + x3, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp5 = tmp3 - tmp4 tmp7 = 0.0001 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tl.full([1], 1, tl.int32) tmp11 = tmp10 / tmp9 tmp12 = tmp11 * tmp1 tmp13 = tmp5 * tmp12 tmp14 = tmp2 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x4, tmp16, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_linalg_vector_norm_mv_0[grid(1)](primals_5, primals_4, buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_dot_linalg_vector_norm_mv_1[grid(1)](buf4, primals_5, buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_2[grid(16)](primals_5, buf4, buf5, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (1, 4), 0), out=buf6) buf7 = buf0 del buf0 buf8 = buf4 del buf4 triton_per_fused_linalg_vector_norm_mv_0[grid(1)](primals_8, primals_7, buf7, buf8, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf10 = buf1 del buf1 buf11 = buf10 del buf10 triton_per_fused_add_div_dot_linalg_vector_norm_mv_1[grid(1)](buf11, primals_8, buf7, buf8, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf7 del buf8 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_2[grid(16)](primals_8, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf11 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(buf12, (4, 4), (1, 4), 0), out=buf13) buf14 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused__native_batch_norm_legit_no_training_add_mul_3[grid (4096)](buf6, primals_1, primals_2, primals_3, buf13, buf14, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf13 del buf6 return (buf14, buf5, buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_7, primals_8, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0)) def l2normalize(v, eps=0.0001): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] _w = w.view(height, -1) for _ in range(self.power_iterations): v = l2normalize(torch.matmul(_w.t(), u)) u = l2normalize(torch.matmul(_w, v)) sigma = u.dot(_w.mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class ConditionalBatchNorm2dNew(nn.Module): def __init__(self, num_features, num_classes, eps=0.0001, momentum=0.1): super().__init__() self.num_features = num_features self.bn = nn.BatchNorm2d(num_features, affine=False, eps=eps, momentum=momentum) self.gamma_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False)) self.beta_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False)) def forward(self, input_0, input_1): primals_2 = self.gamma_embed.module.weight_u primals_3 = self.gamma_embed.module.weight_v primals_5 = self.gamma_embed.module.weight_bar primals_4 = self.beta_embed.module.weight_u primals_7 = self.beta_embed.module.weight_v primals_8 = self.beta_embed.module.weight_bar primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
PeterouZh/Omni-GAN-PyTorch
ConditionalBatchNorm2d
false
14,181
[ "MIT" ]
56
564a586fed6ce51ef73933d8815d94ce077c4e5c
https://github.com/PeterouZh/Omni-GAN-PyTorch/tree/564a586fed6ce51ef73933d8815d94ce077c4e5c
FloorDiv
import torch class FloorDiv(torch.nn.Module): def __init__(self): super(FloorDiv, self).__init__() def forward(self, x, y): return x // y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_floor_divide_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_floor_divide_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FloorDivNew(torch.nn.Module): def __init__(self): super(FloorDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
FloorDiv
false
14,182
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
FunctionalRelu
import torch class FunctionalRelu(torch.nn.Module): def forward(self, x): return torch.nn.functional.relu(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FunctionalReluNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
FunctionalRelu
false
14,183
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
ATLoss
import torch import torch.nn as nn from torch.nn import functional as F class ATLoss(nn.Module): """ Module for calculating AT Loss :param norm_type (int): Norm to be used in calculating loss """ def __init__(self, norm_type=2): super(ATLoss, self).__init__() self.p = norm_type def forward(self, teacher_output, student_output): """ Forward function :param teacher_output (torch.FloatTensor): Prediction made by the teacher model :param student_output (torch.FloatTensor): Prediction made by the student model """ A_t = teacher_output[1:] A_s = student_output[1:] loss = 0.0 for layerT, layerS in zip(A_t, A_s): xT = self.single_at_loss(layerT) xS = self.single_at_loss(layerS) loss += (xS - xT).pow(self.p).mean() return loss def single_at_loss(self, activation): """ Function for calculating single attention loss """ return F.normalize(activation.pow(self.p).mean(1).view(activation. size(0), -1)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_linalg_vector_norm_mean_pow_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (64 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (68 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (72 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (76 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (65 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (69 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (73 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (77 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (66 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (70 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (74 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr0 + (78 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (67 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp44 = tl.load(in_ptr0 + (71 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (75 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (79 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp11 tmp26 = tmp25 * tmp25 tmp27 = tmp13 + tmp26 tmp29 = tmp28 * tmp28 tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp34 = tmp33 * tmp33 tmp35 = tmp32 + tmp34 tmp37 = tmp36 * tmp36 tmp38 = tmp35 + tmp37 tmp39 = tmp38 / tmp11 tmp40 = tmp39 * tmp39 tmp41 = tmp27 + tmp40 tmp43 = tmp42 * tmp42 tmp45 = tmp44 * tmp44 tmp46 = tmp43 + tmp45 tmp48 = tmp47 * tmp47 tmp49 = tmp46 + tmp48 tmp51 = tmp50 * tmp50 tmp52 = tmp49 + tmp51 tmp53 = tmp52 / tmp11 tmp54 = tmp53 * tmp53 tmp55 = tmp41 + tmp54 tl.store(out_ptr0 + x0, tmp55, xmask) @triton.jit def triton_poi_fused_linalg_vector_norm_mean_pow_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (128 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (132 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (136 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (140 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (129 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (133 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (137 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (141 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (130 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (134 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (138 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr0 + (142 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (131 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp44 = tl.load(in_ptr0 + (135 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (139 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (143 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp11 tmp26 = tmp25 * tmp25 tmp27 = tmp13 + tmp26 tmp29 = tmp28 * tmp28 tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp34 = tmp33 * tmp33 tmp35 = tmp32 + tmp34 tmp37 = tmp36 * tmp36 tmp38 = tmp35 + tmp37 tmp39 = tmp38 / tmp11 tmp40 = tmp39 * tmp39 tmp41 = tmp27 + tmp40 tmp43 = tmp42 * tmp42 tmp45 = tmp44 * tmp44 tmp46 = tmp43 + tmp45 tmp48 = tmp47 * tmp47 tmp49 = tmp46 + tmp48 tmp51 = tmp50 * tmp50 tmp52 = tmp49 + tmp51 tmp53 = tmp52 / tmp11 tmp54 = tmp53 * tmp53 tmp55 = tmp41 + tmp54 tl.store(out_ptr0 + x0, tmp55, xmask) @triton.jit def triton_poi_fused_linalg_vector_norm_mean_pow_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (192 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (196 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (200 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (204 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (193 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (197 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (201 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (205 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (194 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (198 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (202 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr0 + (206 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (195 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp44 = tl.load(in_ptr0 + (199 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (203 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (207 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp11 tmp26 = tmp25 * tmp25 tmp27 = tmp13 + tmp26 tmp29 = tmp28 * tmp28 tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp34 = tmp33 * tmp33 tmp35 = tmp32 + tmp34 tmp37 = tmp36 * tmp36 tmp38 = tmp35 + tmp37 tmp39 = tmp38 / tmp11 tmp40 = tmp39 * tmp39 tmp41 = tmp27 + tmp40 tmp43 = tmp42 * tmp42 tmp45 = tmp44 * tmp44 tmp46 = tmp43 + tmp45 tmp48 = tmp47 * tmp47 tmp49 = tmp46 + tmp48 tmp51 = tmp50 * tmp50 tmp52 = tmp49 + tmp51 tmp53 = tmp52 / tmp11 tmp54 = tmp53 * tmp53 tmp55 = tmp41 + tmp54 tl.store(out_ptr0 + x0, tmp55, xmask) @triton.jit def triton_per_fused_add_div_mean_pow_sub_view_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + (64 + r0 + 16 * r1), None) tmp2 = tl.load(in_ptr0 + (68 + r0 + 16 * r1), None) tmp5 = tl.load(in_ptr0 + (72 + r0 + 16 * r1), None) tmp8 = tl.load(in_ptr0 + (76 + r0 + 16 * r1), None) tmp13 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (64 + r0 + 16 * r1), None) tmp20 = tl.load(in_ptr2 + (68 + r0 + 16 * r1), None) tmp23 = tl.load(in_ptr2 + (72 + r0 + 16 * r1), None) tmp26 = tl.load(in_ptr2 + (76 + r0 + 16 * r1), None) tmp30 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr0 + (128 + r0 + 16 * r1), None) tmp41 = tl.load(in_ptr0 + (132 + r0 + 16 * r1), None) tmp44 = tl.load(in_ptr0 + (136 + r0 + 16 * r1), None) tmp47 = tl.load(in_ptr0 + (140 + r0 + 16 * r1), None) tmp51 = tl.load(in_ptr4 + r1, None, eviction_policy='evict_last') tmp55 = tl.load(in_ptr2 + (128 + r0 + 16 * r1), None) tmp57 = tl.load(in_ptr2 + (132 + r0 + 16 * r1), None) tmp60 = tl.load(in_ptr2 + (136 + r0 + 16 * r1), None) tmp63 = tl.load(in_ptr2 + (140 + r0 + 16 * r1), None) tmp67 = tl.load(in_ptr5 + r1, None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr0 + (192 + r0 + 16 * r1), None) tmp78 = tl.load(in_ptr0 + (196 + r0 + 16 * r1), None) tmp81 = tl.load(in_ptr0 + (200 + r0 + 16 * r1), None) tmp84 = tl.load(in_ptr0 + (204 + r0 + 16 * r1), None) tmp88 = tl.load(in_ptr6 + r1, None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (192 + r0 + 16 * r1), None) tmp94 = tl.load(in_ptr2 + (196 + r0 + 16 * r1), None) tmp97 = tl.load(in_ptr2 + (200 + r0 + 16 * r1), None) tmp100 = tl.load(in_ptr2 + (204 + r0 + 16 * r1), None) tmp104 = tl.load(in_ptr7 + r1, None, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp14 = libdevice.sqrt(tmp13) tmp15 = 1e-12 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp12 / tmp16 tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp28 / tmp11 tmp31 = libdevice.sqrt(tmp30) tmp32 = triton_helpers.maximum(tmp31, tmp15) tmp33 = tmp29 / tmp32 tmp34 = tmp17 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tl.broadcast_to(tmp35, [XBLOCK, RBLOCK]) tmp38 = tl.sum(tmp36, 1)[:, None] tmp40 = tmp39 * tmp39 tmp42 = tmp41 * tmp41 tmp43 = tmp40 + tmp42 tmp45 = tmp44 * tmp44 tmp46 = tmp43 + tmp45 tmp48 = tmp47 * tmp47 tmp49 = tmp46 + tmp48 tmp50 = tmp49 / tmp11 tmp52 = libdevice.sqrt(tmp51) tmp53 = triton_helpers.maximum(tmp52, tmp15) tmp54 = tmp50 / tmp53 tmp56 = tmp55 * tmp55 tmp58 = tmp57 * tmp57 tmp59 = tmp56 + tmp58 tmp61 = tmp60 * tmp60 tmp62 = tmp59 + tmp61 tmp64 = tmp63 * tmp63 tmp65 = tmp62 + tmp64 tmp66 = tmp65 / tmp11 tmp68 = libdevice.sqrt(tmp67) tmp69 = triton_helpers.maximum(tmp68, tmp15) tmp70 = tmp66 / tmp69 tmp71 = tmp54 - tmp70 tmp72 = tmp71 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = tl.sum(tmp73, 1)[:, None] tmp77 = tmp76 * tmp76 tmp79 = tmp78 * tmp78 tmp80 = tmp77 + tmp79 tmp82 = tmp81 * tmp81 tmp83 = tmp80 + tmp82 tmp85 = tmp84 * tmp84 tmp86 = tmp83 + tmp85 tmp87 = tmp86 / tmp11 tmp89 = libdevice.sqrt(tmp88) tmp90 = triton_helpers.maximum(tmp89, tmp15) tmp91 = tmp87 / tmp90 tmp93 = tmp92 * tmp92 tmp95 = tmp94 * tmp94 tmp96 = tmp93 + tmp95 tmp98 = tmp97 * tmp97 tmp99 = tmp96 + tmp98 tmp101 = tmp100 * tmp100 tmp102 = tmp99 + tmp101 tmp103 = tmp102 / tmp11 tmp105 = libdevice.sqrt(tmp104) tmp106 = triton_helpers.maximum(tmp105, tmp15) tmp107 = tmp103 / tmp106 tmp108 = tmp91 - tmp107 tmp109 = tmp108 * tmp108 tmp110 = tl.broadcast_to(tmp109, [XBLOCK, RBLOCK]) tmp112 = tl.sum(tmp110, 1)[:, None] tmp113 = 16.0 tmp114 = tmp38 / tmp113 tmp115 = 0.0 tmp116 = tmp114 + tmp115 tmp117 = tmp75 / tmp113 tmp118 = tmp116 + tmp117 tmp119 = tmp112 / tmp113 tmp120 = tmp118 + tmp119 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp120, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_linalg_vector_norm_mean_pow_view_0[grid(4)](arg1_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_linalg_vector_norm_mean_pow_view_0[grid(4)](arg0_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_linalg_vector_norm_mean_pow_view_1[grid(4)](arg1_1, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_linalg_vector_norm_mean_pow_view_1[grid(4)](arg0_1, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_linalg_vector_norm_mean_pow_view_2[grid(4)](arg1_1, buf8, 4, XBLOCK=4, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_linalg_vector_norm_mean_pow_view_2[grid(4)](arg0_1, buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((), (), torch.float32) buf12 = buf11 del buf11 triton_per_fused_add_div_mean_pow_sub_view_3[grid(1)](buf12, arg1_1, buf0, arg0_1, buf1, buf4, buf5, buf8, buf9, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf0 del buf1 del buf4 del buf5 del buf8 del buf9 return buf12, class ATLossNew(nn.Module): """ Module for calculating AT Loss :param norm_type (int): Norm to be used in calculating loss """ def __init__(self, norm_type=2): super(ATLossNew, self).__init__() self.p = norm_type def single_at_loss(self, activation): """ Function for calculating single attention loss """ return F.normalize(activation.pow(self.p).mean(1).view(activation. size(0), -1)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PiaCuk/KD_Lib
ATLoss
false
14,184
[ "MIT" ]
360
153299d484e4c6b33793749709dbb0f33419f190
https://github.com/PiaCuk/KD_Lib/tree/153299d484e4c6b33793749709dbb0f33419f190
IAdd
import torch class IAdd(torch.nn.Module): def __init__(self): super(IAdd, self).__init__() def forward(self, x, y): x += y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class IAddNew(torch.nn.Module): def __init__(self): super(IAddNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
IAdd
false
14,185
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
FloorDivAssign
import torch class FloorDivAssign(torch.nn.Module): def __init__(self): super(FloorDivAssign, self).__init__() def forward(self, x, y): x //= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_floor_divide_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr1 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_floor_divide_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class FloorDivAssignNew(torch.nn.Module): def __init__(self): super(FloorDivAssignNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
FloorDivAssign
false
14,186
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
Mod
import torch class Mod(torch.nn.Module): def __init__(self): super(Mod, self).__init__() def forward(self, x, y): return x % y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_remainder_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 % tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 != tmp3 tmp5 = libdevice.signbit(tmp2) if tmp2.dtype is tl.float32 else tmp2 < 0 tmp6 = libdevice.signbit(tmp1) if tmp1.dtype is tl.float32 else tmp1 < 0 tmp7 = tmp5 != tmp6 tmp8 = tmp4 & tmp7 tmp9 = tmp2 + tmp1 tmp10 = tl.where(tmp8, tmp9, tmp2) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_remainder_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ModNew(torch.nn.Module): def __init__(self): super(ModNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
Mod
false
14,187
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
Div
import torch class Div(torch.nn.Module): def __init__(self): super(Div, self).__init__() def forward(self, x, y): return x / y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class DivNew(torch.nn.Module): def __init__(self): super(DivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
Div
false
14,188
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
GT
import torch class GT(torch.nn.Module): def __init__(self): super(GT, self).__init__() def forward(self, x, y): return x > y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_gt_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class GTNew(torch.nn.Module): def __init__(self): super(GTNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
GT
false
14,189
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
ScaleNorm
import torch from torch import nn class ScaleNorm(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.scale = dim ** -0.5 self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, x): n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps ) * self.scale x = x / n * self.g return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + 0) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tmp0 / tmp16 tmp20 = tmp17 * tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)]( primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ScaleNormNew(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.scale = dim ** -0.5 self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, input_0): primals_2 = self.g primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Piki1989/spacetimeformer
ScaleNorm
false
14,190
[ "MIT" ]
209
7e0caf17dd03e5d25e2766c4f7132805779bcc40
https://github.com/Piki1989/spacetimeformer/tree/7e0caf17dd03e5d25e2766c4f7132805779bcc40
ISub
import torch class ISub(torch.nn.Module): def __init__(self): super(ISub, self).__init__() def forward(self, x, y): x -= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class ISubNew(torch.nn.Module): def __init__(self): super(ISubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
ISub
false
14,191
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
ModAssign
import torch class ModAssign(torch.nn.Module): def __init__(self): super(ModAssign, self).__init__() def forward(self, x, y): x %= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_remainder_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 % tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 != tmp3 tmp5 = libdevice.signbit(tmp2) if tmp2.dtype is tl.float32 else tmp2 < 0 tmp6 = libdevice.signbit(tmp1) if tmp1.dtype is tl.float32 else tmp1 < 0 tmp7 = tmp5 != tmp6 tmp8 = tmp4 & tmp7 tmp9 = tmp2 + tmp1 tmp10 = tl.where(tmp8, tmp9, tmp2) tl.store(out_ptr1 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_remainder_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class ModAssignNew(torch.nn.Module): def __init__(self): super(ModAssignNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
ModAssign
false
14,192
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
MaxPool1D
import torch class MaxPool1D(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False): super().__init__() self.kernel_size = kernel_size self.stride = stride self.padding = padding self.ceil_mode = ceil_mode def forward(self, x): return torch.nn.functional.max_pool1d(x, self.kernel_size, stride= self.stride, padding=self.padding, ceil_mode=self.ceil_mode) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1), (1, 1), 0), class MaxPool1DNew(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False): super().__init__() self.kernel_size = kernel_size self.stride = stride self.padding = padding self.ceil_mode = ceil_mode def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
MaxPool1D
false
14,193
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
MinElementwise
import torch class MinElementwise(torch.nn.Module): def forward(self, x, y): return torch.min(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_minimum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_minimum_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class MinElementwiseNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
MinElementwise
false
14,194
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
IMul
import torch class IMul(torch.nn.Module): def __init__(self): super(IMul, self).__init__() def forward(self, x, y): x *= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class IMulNew(torch.nn.Module): def __init__(self): super(IMulNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
IMul
false
14,195
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
IDiv
import torch class IDiv(torch.nn.Module): def __init__(self): super(IDiv, self).__init__() def forward(self, x, y): x /= y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, arg1_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 return arg0_1, class IDivNew(torch.nn.Module): def __init__(self): super(IDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
IDiv
false
14,196
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
Mul
import torch class Mul(torch.nn.Module): def __init__(self): super(Mul, self).__init__() def forward(self, x, y): return x * y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class MulNew(torch.nn.Module): def __init__(self): super(MulNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
Mul
false
14,197
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
ModConst
import torch class ModConst(torch.nn.Module): def __init__(self): super(ModConst, self).__init__() def forward(self, x): return x % 2.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_remainder_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 % tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 != tmp3 tmp5 = libdevice.signbit(tmp2) if tmp2.dtype is tl.float32 else tmp2 < 0 tmp6 = libdevice.signbit(tmp1) if tmp1.dtype is tl.float32 else tmp1 < 0 tmp7 = tmp5 != tmp6 tmp8 = tmp4 & tmp7 tmp9 = tmp2 + tmp1 tmp10 = tl.where(tmp8, tmp9, tmp2) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_remainder_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ModConstNew(torch.nn.Module): def __init__(self): super(ModConstNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
ModConst
false
14,198
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
MaxElementwise
import torch class MaxElementwise(torch.nn.Module): def forward(self, x, y): return torch.max(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_maximum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_maximum_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class MaxElementwiseNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
MaxElementwise
false
14,199
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
LT
import torch class LT(torch.nn.Module): def __init__(self): super(LT, self).__init__() def forward(self, x, y): return x < y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_lt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 < tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_lt_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class LTNew(torch.nn.Module): def __init__(self): super(LTNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
LT
false
14,200
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
FloorDivConst
import torch class FloorDivConst(torch.nn.Module): def __init__(self): super(FloorDivConst, self).__init__() def forward(self, x): return x // 2.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_floor_divide_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_floor_divide_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FloorDivConstNew(torch.nn.Module): def __init__(self): super(FloorDivConstNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
FloorDivConst
false
14,202
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
FunctionalRelu6
import torch class FunctionalRelu6(torch.nn.Module): def forward(self, x): return torch.nn.functional.relu6(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 6.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FunctionalRelu6New(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
FunctionalRelu6
false
14,203
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
SequentialPolarizedSelfAttention
import torch from torch import nn class SequentialPolarizedSelfAttention(nn.Module): def __init__(self, channel=512): super().__init__() self.ch_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.ch_wq = nn.Conv2d(channel, 1, kernel_size=(1, 1)) self.softmax_channel = nn.Softmax(1) self.softmax_spatial = nn.Softmax(-1) self.ch_wz = nn.Conv2d(channel // 2, channel, kernel_size=(1, 1)) self.ln = nn.LayerNorm(channel) self.sigmoid = nn.Sigmoid() self.sp_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.sp_wq = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.agp = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, x): b, c, h, w = x.size() channel_wv = self.ch_wv(x) channel_wq = self.ch_wq(x) channel_wv = channel_wv.reshape(b, c // 2, -1) channel_wq = channel_wq.reshape(b, -1, 1) channel_wq = self.softmax_channel(channel_wq) channel_wz = torch.matmul(channel_wv, channel_wq).unsqueeze(-1) channel_weight = self.sigmoid(self.ln(self.ch_wz(channel_wz). reshape(b, c, 1).permute(0, 2, 1))).permute(0, 2, 1).reshape(b, c, 1, 1) channel_out = channel_weight * x spatial_wv = self.sp_wv(channel_out) spatial_wq = self.sp_wq(channel_out) spatial_wq = self.agp(spatial_wq) spatial_wv = spatial_wv.reshape(b, c // 2, -1) spatial_wq = spatial_wq.permute(0, 2, 3, 1).reshape(b, 1, c // 2) spatial_wq = self.softmax_spatial(spatial_wq) spatial_wz = torch.matmul(spatial_wq, spatial_wv) spatial_weight = self.sigmoid(spatial_wz.reshape(b, 1, h, w)) spatial_out = spatial_weight * channel_out return spatial_out def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_red_fused__softmax_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) _tmp5 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp0 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = triton_helpers.maximum(_tmp5, tmp4) _tmp5 = tl.where(rmask & xmask, tmp6, _tmp5) tmp5 = triton_helpers.max2(_tmp5, 1)[:, None] tmp8 = tl.load(in_ptr1 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) _tmp14 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp7 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp7 + tmp9 tmp11 = tmp10 - tmp5 tmp12 = tl_math.exp(tmp11) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = _tmp14 + tmp13 _tmp14 = tl.where(rmask & xmask, tmp15, _tmp14) tmp14 = tl.sum(_tmp14, 1)[:, None] tmp17 = tl.load(in_ptr1 + 0) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp16 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp19 = tmp16 + tmp18 tmp20 = tmp19 - tmp5 tmp21 = tl_math.exp(tmp20) tmp22 = tmp21 / tmp14 tl.store(out_ptr2 + (r1 + 4096 * x0), tmp22, rmask & xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1048576 * y1), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, None) @triton.jit def triton_per_fused_convolution_native_layer_norm_sigmoid_3(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 512, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 512.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tmp21 = tmp2 - tmp10 tmp22 = tmp21 * tmp20 tmp24 = tmp22 * tmp23 tmp26 = tmp24 + tmp25 tmp27 = tl.sigmoid(tmp26) tl.store(in_out_ptr0 + (r1 + 512 * x0), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp20, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp27, None) tl.store(out_ptr0 + x0, tmp10, None) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x2 = xindex // 2097152 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x2), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, None) @triton.jit def triton_red_fused_convolution_mean_5(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 256 x1 = xindex // 256 tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') _tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * r2 + 32768 * x1), rmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = _tmp4 + tmp3 _tmp4 = tl.where(rmask, tmp5, _tmp4) tmp4 = tl.sum(_tmp4, 1)[:, None] tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_per_fused_convolution_mean_6(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 1024 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 256 x1 = xindex // 256 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * r2 + 8192 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_per_fused__softmax_7(in_ptr0, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = 4096.0 tmp2 = tmp0 / tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0)) tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = tmp7 / tmp10 tl.store(out_ptr2 + (r1 + 256 * x0), tmp11, None) @triton.jit def triton_poi_fused_mul_sigmoid_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y1 = yindex // 512 y0 = yindex % 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y1), None, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr1 + (y0 + 512 * x2 + 2097152 * y1), None, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_2, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (1, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (512, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512,), (1,)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_13, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_1, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 1, 64, 1)) buf5 = empty_strided_cuda((4, 4096, 1), (4096, 1, 1), torch.float32) triton_red_fused__softmax_1[grid(4)](buf2, primals_5, buf5, 4, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_2[grid(1024, 4096)](buf1, primals_3, buf6, 1024, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf1 del primals_3 buf7 = empty_strided_cuda((4, 256, 1), (256, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 256, 4096), ( 1048576, 4096, 1), 0), buf5, out=buf7) buf8 = extern_kernels.convolution(reinterpret_tensor(buf7, (4, 256, 1, 1), (256, 1, 1, 1), 0), primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 512, 1, 1), (512, 1, 1, 1)) buf9 = reinterpret_tensor(buf8, (4, 512, 1, 1), (512, 1, 512, 512), 0) del buf8 buf10 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf13 = reinterpret_tensor(buf11, (4, 1, 1), (1, 1, 1), 0) del buf11 buf14 = empty_strided_cuda((4, 1, 512), (512, 2048, 1), torch.float32) triton_per_fused_convolution_native_layer_norm_sigmoid_3[grid(4)](buf9, buf13, primals_7, primals_8, primals_9, buf10, buf14, 4, 512, num_warps=4, num_stages=1) del primals_7 buf15 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32) triton_poi_fused_mul_4[grid(8388608)](buf14, buf0, buf15, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del buf14 buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf17 = extern_kernels.convolution(buf15, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf18 = empty_strided_cuda((4, 256, 1, 1, 32), (8192, 1, 32768, 32768, 256), torch.float32) triton_red_fused_convolution_mean_5[grid(32768)](buf17, primals_13, buf18, 32768, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) del primals_13 buf19 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024), torch.float32) triton_per_fused_convolution_mean_6[grid(1024)](buf18, buf19, 1024, 32, XBLOCK=128, num_warps=8, num_stages=1) del buf18 buf22 = empty_strided_cuda((4, 1, 256), (256, 256, 1), torch.float32) triton_per_fused__softmax_7[grid(4)](buf19, buf22, 4, 256, num_warps=2, num_stages=1) del buf19 buf23 = reinterpret_tensor(buf17, (4, 256, 64, 64), (1048576, 4096, 64, 1), 0) del buf17 triton_poi_fused_convolution_2[grid(1024, 4096)](buf16, primals_11, buf23, 1024, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf16 del primals_11 buf24 = reinterpret_tensor(buf2, (4, 1, 4096), (4096, 4096, 1), 0) del buf2 extern_kernels.bmm(buf22, reinterpret_tensor(buf23, (4, 256, 4096), (1048576, 4096, 1), 0), out=buf24) buf25 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) triton_poi_fused_mul_sigmoid_8[grid(2048, 4096)](buf24, buf15, buf25, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) return (buf25, buf0, primals_2, primals_4, primals_6, primals_8, primals_9, primals_10, primals_12, buf5, reinterpret_tensor(buf7, ( 4, 256, 1, 1), (256, 1, 1, 1), 0), buf9, buf10, buf13, buf15, buf22, buf24, reinterpret_tensor(buf23, (4, 4096, 256), (1048576, 1, 4096), 0), reinterpret_tensor(buf6, (4, 4096, 256), (1048576, 1, 4096), 0)) class SequentialPolarizedSelfAttentionNew(nn.Module): def __init__(self, channel=512): super().__init__() self.ch_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.ch_wq = nn.Conv2d(channel, 1, kernel_size=(1, 1)) self.softmax_channel = nn.Softmax(1) self.softmax_spatial = nn.Softmax(-1) self.ch_wz = nn.Conv2d(channel // 2, channel, kernel_size=(1, 1)) self.ln = nn.LayerNorm(channel) self.sigmoid = nn.Sigmoid() self.sp_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.sp_wq = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.agp = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, input_0): primals_2 = self.ch_wv.weight primals_3 = self.ch_wv.bias primals_4 = self.ch_wq.weight primals_5 = self.ch_wq.bias primals_6 = self.ch_wz.weight primals_7 = self.ch_wz.bias primals_8 = self.ln.weight primals_9 = self.ln.bias primals_10 = self.sp_wv.weight primals_11 = self.sp_wv.bias primals_12 = self.sp_wq.weight primals_13 = self.sp_wq.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Nitin-Mane/External-Attention-pytorch
SequentialPolarizedSelfAttention
false
14,204
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
RMulInt
import torch class RMulInt(torch.nn.Module): def __init__(self): super(RMulInt, self).__init__() def forward(self, x): return 10 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RMulIntNew(torch.nn.Module): def __init__(self): super(RMulIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RMulInt
false
14,205
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
LinearModel
import torch from torch import nn class LinearModel(nn.Module): def __init__(self, context_points: 'int'): super().__init__() self.window = context_points self.linear = nn.Linear(context_points, 1) def forward(self, y_c): _bs, _length, d_y = y_c.shape inp = y_c[:, -self.window:, :] inp = torch.cat(inp.chunk(d_y, dim=-1), dim=0) baseline = self.linear(inp.squeeze(-1)) baseline = torch.cat(baseline.chunk(d_y, dim=0), dim=-1).unsqueeze(1) return baseline def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'context_points': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 + x1), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (8 + x1), tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr0 + (12 + x1), tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 return reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0) class LinearModelNew(nn.Module): def __init__(self, context_points: 'int'): super().__init__() self.window = context_points self.linear = nn.Linear(context_points, 1) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Piki1989/spacetimeformer
LinearModel
false
14,206
[ "MIT" ]
209
7e0caf17dd03e5d25e2766c4f7132805779bcc40
https://github.com/Piki1989/spacetimeformer/tree/7e0caf17dd03e5d25e2766c4f7132805779bcc40
NotEqualConst
import torch class NotEqualConst(torch.nn.Module): def __init__(self): super(NotEqualConst, self).__init__() def forward(self, x): return x != 13.62 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 13.62 tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_ne_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NotEqualConstNew(torch.nn.Module): def __init__(self): super(NotEqualConstNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
NotEqualConst
false
14,207
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
RAddInt
import torch class RAddInt(torch.nn.Module): def __init__(self): super(RAddInt, self).__init__() def forward(self, x): return 1 + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RAddIntNew(torch.nn.Module): def __init__(self): super(RAddIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RAddInt
false
14,208
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
SoftKLDivLoss
import torch from torch import Tensor from torch.nn import functional as F class SoftKLDivLoss(torch.nn.Module): def __init__(self, temp=20.0, reduction='batchmean', log_target=False ) ->None: super(SoftKLDivLoss, self).__init__() self.temp = temp self.reduction = reduction self.log_target = log_target def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor: soft_input = torch.log_softmax(input / self.temp, dim=-1) soft_target = torch.softmax(target / self.temp, dim=-1) return self.temp ** 2 * F.kl_div(soft_input, soft_target, reduction =self.reduction, log_target=self.log_target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.05 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.05 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 0.25 tmp37 = tmp34 * tmp36 tmp38 = 400.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 return buf4, class SoftKLDivLossNew(torch.nn.Module): def __init__(self, temp=20.0, reduction='batchmean', log_target=False ) ->None: super(SoftKLDivLossNew, self).__init__() self.temp = temp self.reduction = reduction self.log_target = log_target def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PiaCuk/KD_Lib
SoftKLDivLoss
false
14,209
[ "MIT" ]
360
153299d484e4c6b33793749709dbb0f33419f190
https://github.com/PiaCuk/KD_Lib/tree/153299d484e4c6b33793749709dbb0f33419f190
TensorClampMin
import torch class TensorClampMin(torch.nn.Module): def forward(self, x): return x.clamp_min(-0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.1 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampMinNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
TensorClampMin
false
14,210
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
RSubFloat
import torch class RSubFloat(torch.nn.Module): def __init__(self): super(RSubFloat, self).__init__() def forward(self, x): return 1.0 - x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RSubFloatNew(torch.nn.Module): def __init__(self): super(RSubFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RSubFloat
false
14,211
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
Extractor
from torch.nn import Module import torch from torch import Tensor from typing import Optional from typing import Tuple import torch.nn.functional as F from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Conv1d from torch.nn import MultiheadAttention class Extractor(Module): """Convolutional Transformer Decoder Layer""" def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout= 0.1, no_residual=False): super(Extractor, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.cross_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.conv1 = Conv1d(d_model, d_hid, 9, padding=4) self.conv2 = Conv1d(d_hid, d_model, 1, padding=0) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.norm3 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) self.dropout3 = Dropout(dropout) self.no_residual = no_residual def forward(self, tgt: 'Tensor', memory: 'Tensor', tgt_mask: 'Optional[Tensor]'=None, memory_mask: 'Optional[Tensor]'=None, tgt_key_padding_mask: 'Optional[Tensor]'=None, memory_key_padding_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Optional[Tensor]]: tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = tgt + self.dropout1(tgt2) tgt = self.norm1(tgt) tgt2, attn = self.cross_attn(tgt, memory, memory, attn_mask= memory_mask, key_padding_mask=memory_key_padding_mask) if self.no_residual: tgt = self.dropout2(tgt2) else: tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = tgt.transpose(0, 1).transpose(1, 2) tgt2 = self.conv2(F.relu(self.conv1(tgt2))) tgt2 = tgt2.transpose(1, 2).transpose(0, 1) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Conv1d from torch.nn import MultiheadAttention assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2 % 4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mean_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_13(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x3 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tmp1 = tl.load(in_ptr1 + (y0 + 4 * x3), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x3 + 16 * y0), tmp4, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (12, 4), (4, 1)) assert_size_stride(primals_10, (12,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4, 9), (36, 9, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(192)](buf0, primals_2, buf1, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) triton_poi_fused_mul_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (16, 1, 4), (1, 0, 16), 64), out=buf3) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 128), out=buf6) buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(4, 16)](buf6, buf7, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0) del buf6 extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_5 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf8, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf8, buf9, buf10, primals_6, primals_7, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 8), (1, 4), 16), out=buf13) buf14 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(128)](buf13, primals_10, buf14, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf13 buf15 = reinterpret_tensor(buf12, (16, 4, 1), (1, 16, 64), 0) del buf12 triton_poi_fused_mul_8[grid(64)](buf15, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf16 = buf4 del buf4 extern_kernels.bmm(buf15, reinterpret_tensor(buf14, (16, 1, 4), (1, 0, 16), 0), out=buf16) buf17 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf16, buf17, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf18 = buf16 del buf16 triton_poi_fused__softmax_3[grid(256)](buf17, buf18, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf17 buf19 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf18, reinterpret_tensor(buf14, (16, 4, 1), (1, 16, 0), 64), out=buf19) buf20 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(4, 16)](buf19, buf20, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf21 = reinterpret_tensor(buf19, (16, 4), (4, 1), 0) del buf19 extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mean_9[grid(64)](buf18, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) buf23 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0) del buf21 triton_poi_fused_add_10[grid(64)](buf23, buf11, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf24 = buf9 del buf9 buf25 = buf10 del buf10 triton_poi_fused_native_layer_norm_11[grid(16)](buf23, buf24, buf25, 16, XBLOCK=16, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf23, buf24, buf25, primals_13, primals_14, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_13[grid(16, 4)](buf26, buf27, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_15, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf28, (4, 4, 4), (16, 4, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_14[grid(64)](buf29, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 4), (16, 4, 1)) buf31 = buf27 del buf27 triton_poi_fused_add_15[grid(4, 16)](buf26, buf30, primals_18, buf31, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del primals_18 buf32 = buf25 del buf25 buf33 = buf24 del buf24 triton_poi_fused_native_layer_norm_11[grid(16)](buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = buf30 del buf30 triton_poi_fused_native_layer_norm_12[grid(64)](buf31, buf32, buf33, primals_19, primals_20, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf32 del buf33 del primals_20 return (buf34, buf22, primals_1, primals_6, primals_13, primals_15, primals_17, primals_19, buf5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf18, reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf23, reinterpret_tensor(buf26, (4, 4, 4), (4, 1, 16), 0), buf29, buf31, primals_11, reinterpret_tensor(buf14, (16, 1, 4), (1, 1, 16), 64), reinterpret_tensor(buf15, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf14, (16, 4, 1), (1, 16, 1), 0), reinterpret_tensor(primals_9, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 128), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 64)) class ExtractorNew(Module): """Convolutional Transformer Decoder Layer""" def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout= 0.1, no_residual=False): super(ExtractorNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.cross_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.conv1 = Conv1d(d_model, d_hid, 9, padding=4) self.conv2 = Conv1d(d_hid, d_model, 1, padding=0) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.norm3 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) self.dropout3 = Dropout(dropout) self.no_residual = no_residual def forward(self, input_0, input_1): primals_3 = self.self_attn.in_proj_weight primals_2 = self.self_attn.in_proj_bias primals_4 = self.self_attn.out_proj.weight primals_5 = self.self_attn.out_proj.bias primals_9 = self.cross_attn.in_proj_weight primals_10 = self.cross_attn.in_proj_bias primals_11 = self.cross_attn.out_proj.weight primals_6 = self.cross_attn.out_proj.bias primals_15 = self.conv1.weight primals_7 = self.conv1.bias primals_17 = self.conv2.weight primals_12 = self.conv2.bias primals_13 = self.norm1.weight primals_14 = self.norm1.bias primals_16 = self.norm2.weight primals_18 = self.norm2.bias primals_19 = self.norm3.weight primals_20 = self.norm3.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0], output[1]
OlegJakushkin/FragmentVC
Extractor
false
14,212
[ "MIT" ]
136
8aa673157b855bf3b67f06fdb6eb4b2a12ed0005
https://github.com/OlegJakushkin/FragmentVC/tree/8aa673157b855bf3b67f06fdb6eb4b2a12ed0005
RMulFloat
import torch class RMulFloat(torch.nn.Module): def __init__(self): super(RMulFloat, self).__init__() def forward(self, x): return 10.0 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RMulFloatNew(torch.nn.Module): def __init__(self): super(RMulFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RMulFloat
false
14,213
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TensorClampMax
import torch class TensorClampMax(torch.nn.Module): def forward(self, x): return x.clamp_max(0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_max_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampMaxNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
TensorClampMax
false
14,214
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TorchDiv
import torch class TorchDiv(torch.nn.Module): def __init__(self): super(TorchDiv, self).__init__() def forward(self, x, y): return torch.div(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchDivNew(torch.nn.Module): def __init__(self): super(TorchDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
TorchDiv
false
14,215
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
RDivInt
import torch class RDivInt(torch.nn.Module): def __init__(self): super(RDivInt, self).__init__() def forward(self, x): return 100 / x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RDivIntNew(torch.nn.Module): def __init__(self): super(RDivIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RDivInt
false
14,216
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
NotEqual
import torch class NotEqual(torch.nn.Module): def __init__(self): super(NotEqual, self).__init__() def forward(self, x, y): return x != y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_ne_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_ne_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class NotEqualNew(torch.nn.Module): def __init__(self): super(NotEqualNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
NotEqual
false
14,217
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TensorClampOptionMin
import torch class TensorClampOptionMin(torch.nn.Module): def forward(self, x): return x.clamp(min=-0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.1 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampOptionMinNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
TensorClampOptionMin
false
14,218
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
RSubInt
import torch class RSubInt(torch.nn.Module): def __init__(self): super(RSubInt, self).__init__() def forward(self, x): return 1 - x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RSubIntNew(torch.nn.Module): def __init__(self): super(RSubIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RSubInt
false
14,219
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TorchAdd
import torch class TorchAdd(torch.nn.Module): def __init__(self): super(TorchAdd, self).__init__() def forward(self, x, y): return torch.add(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchAddNew(torch.nn.Module): def __init__(self): super(TorchAddNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
TorchAdd
false
14,220
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
Sub
import torch class Sub(torch.nn.Module): def __init__(self): super(Sub, self).__init__() def forward(self, x, y): return x - y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SubNew(torch.nn.Module): def __init__(self): super(SubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
Sub
false
14,221
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TensorClampOptionMax
import torch class TensorClampOptionMax(torch.nn.Module): def forward(self, x): return x.clamp(max=0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampOptionMaxNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
TensorClampOptionMax
false
14,222
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TorchMod
import torch class TorchMod(torch.nn.Module): def __init__(self): super(TorchMod, self).__init__() def forward(self, x, y): return torch.fmod(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_fmod_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.fmod(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_fmod_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchModNew(torch.nn.Module): def __init__(self): super(TorchModNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
TorchMod
false
14,223
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
RpowFloat
import torch class RpowFloat(torch.nn.Module): def __init__(self): super(RpowFloat, self).__init__() def forward(self, x): return 2.0 ** x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.exp2(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RpowFloatNew(torch.nn.Module): def __init__(self): super(RpowFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RpowFloat
false
14,224
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TorchSub
import torch class TorchSub(torch.nn.Module): def __init__(self): super(TorchSub, self).__init__() def forward(self, x, y): return torch.sub(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchSubNew(torch.nn.Module): def __init__(self): super(TorchSubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
TorchSub
false
14,225
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TorchNotEqual
import torch class TorchNotEqual(torch.nn.Module): def __init__(self): super(TorchNotEqual, self).__init__() def forward(self, x, y): return torch.ne(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_ne_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_ne_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchNotEqualNew(torch.nn.Module): def __init__(self): super(TorchNotEqualNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
TorchNotEqual
false
14,226
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
TorchFloorDiv
import torch class TorchFloorDiv(torch.nn.Module): def __init__(self): super(TorchFloorDiv, self).__init__() def forward(self, x, y): return torch.floor_divide(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_floor_divide_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_floor_divide_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchFloorDivNew(torch.nn.Module): def __init__(self): super(TorchFloorDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
TorchFloorDiv
false
14,227
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
RpowInt
import torch class RpowInt(torch.nn.Module): def __init__(self): super(RpowInt, self).__init__() def forward(self, x): return 2 ** x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.exp2(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RpowIntNew(torch.nn.Module): def __init__(self): super(RpowIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RpowInt
false
14,228
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
SinLU
import torch class SinLU(torch.nn.Module): def __init__(self): super(SinLU, self).__init__() self.thr = torch.nn.Threshold(0, 0) def forward(self, x): return self.thr(x) - self.thr(-x).sin() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_neg_sin_sub_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tmp3 = tl.where(tmp2, tmp1, tmp0) tmp4 = -tmp0 tmp5 = tmp4 <= tmp1 tmp6 = tl.where(tmp5, tmp1, tmp4) tmp7 = tl_math.sin(tmp6) tmp8 = tmp3 - tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_neg_sin_sub_threshold_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SinLUNew(torch.nn.Module): def __init__(self): super(SinLUNew, self).__init__() self.thr = torch.nn.Threshold(0, 0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Praneethsv/prob_mbrl
SinLU
false
14,229
[ "MIT" ]
108
7b1adee6bff742b6f90e9b96ea243f12c9153b9b
https://github.com/Praneethsv/prob_mbrl/tree/7b1adee6bff742b6f90e9b96ea243f12c9153b9b
Exp
import torch class Exp(torch.nn.Module): def forward(self, x): return (-0.5 * x ** 2).exp() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = -0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ExpNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Praneethsv/prob_mbrl
Exp
false
14,230
[ "MIT" ]
108
7b1adee6bff742b6f90e9b96ea243f12c9153b9b
https://github.com/Praneethsv/prob_mbrl/tree/7b1adee6bff742b6f90e9b96ea243f12c9153b9b
up
import torch import torch.utils.data import torch.nn as nn import torch class up(nn.Module): def __init__(self, in_ch, bilinear=False): super(up, self).__init__() if bilinear: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) else: self.up = nn.ConvTranspose2d(in_ch, in_ch, 2, stride=2) def forward(self, x): x = self.up(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn import torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1024)](buf1, primals_2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class upNew(nn.Module): def __init__(self, in_ch, bilinear=False): super(upNew, self).__init__() if bilinear: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) else: self.up = nn.ConvTranspose2d(in_ch, in_ch, 2, stride=2) def forward(self, input_0): primals_1 = self.up.weight primals_2 = self.up.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
PorscheTan/Siam-NestedUNet
up
false
14,231
[ "MIT" ]
122
a60d3d41f0114387c57dcc7cd2de3b6b0f259ad0
https://github.com/PorscheTan/Siam-NestedUNet/tree/a60d3d41f0114387c57dcc7cd2de3b6b0f259ad0
RDivFloat
import torch class RDivFloat(torch.nn.Module): def __init__(self): super(RDivFloat, self).__init__() def forward(self, x): return 100.0 / x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RDivFloatNew(torch.nn.Module): def __init__(self): super(RDivFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RDivFloat
false
14,232
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
CustomKLDivLoss
import torch from torch import Tensor from torch.nn import functional as F class CustomKLDivLoss(torch.nn.Module): def __init__(self, reduction='batchmean', log_target=False, apply_softmax=True) ->None: super(CustomKLDivLoss, self).__init__() self.reduction = reduction self.log_target = log_target self.apply_softmax = apply_softmax def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor: if self.apply_softmax: target = torch.softmax(target, dim=-1) return F.kl_div(torch.log_softmax(input, dim=-1), target, reduction =self.reduction, log_target=self.log_target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 0.25 tmp37 = tmp34 * tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg1_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 return buf4, class CustomKLDivLossNew(torch.nn.Module): def __init__(self, reduction='batchmean', log_target=False, apply_softmax=True) ->None: super(CustomKLDivLossNew, self).__init__() self.reduction = reduction self.log_target = log_target self.apply_softmax = apply_softmax def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PiaCuk/KD_Lib
CustomKLDivLoss
false
14,233
[ "MIT" ]
360
153299d484e4c6b33793749709dbb0f33419f190
https://github.com/PiaCuk/KD_Lib/tree/153299d484e4c6b33793749709dbb0f33419f190
AnyHead
import torch import torch.nn as nn import torch.utils.data class AnyHead(nn.Module): """AnyNet head: AvgPool, 1x1.""" def __init__(self, w_in, nc): super(AnyHead, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(w_in, nc, bias=True) def forward(self, x): x = self.avg_pool(x) x = x.view(x.size(0), -1) out = self.fc(x) return x, out @staticmethod def complexity(cx, w_in, nc): cx['h'], cx['w'] = 1, 1 cx = net.complexity_conv2d(cx, w_in, nc, 1, 1, 0, bias=True) return cx def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'w_in': 4, 'nc': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf2) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0) class AnyHeadNew(nn.Module): """AnyNet head: AvgPool, 1x1.""" def __init__(self, w_in, nc): super(AnyHeadNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(w_in, nc, bias=True) @staticmethod def complexity(cx, w_in, nc): cx['h'], cx['w'] = 1, 1 cx = net.complexity_conv2d(cx, w_in, nc, 1, 1, 0, bias=True) return cx def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Pre-release/BAKE
AnyHead
false
14,234
[ "MIT" ]
67
2899b38d556a9151f55079c1b9888d462369aec8
https://github.com/Pre-release/BAKE/tree/2899b38d556a9151f55079c1b9888d462369aec8
Pow
import torch class Pow(torch.nn.Module): def __init__(self): super(Pow, self).__init__() def forward(self, x, y): return x ** y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.pow(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class PowNew(torch.nn.Module): def __init__(self): super(PowNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PogChamper/torch2trt
Pow
false
14,235
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
KLDivTeacherList
import torch import torch.nn as nn from torch.optim import * from torch.optim.lr_scheduler import * class KLDivTeacherList(nn.Module): def __init__(self): super(KLDivTeacherList, self).__init__() self.kl = torch.nn.KLDivLoss(reduction='batchmean') def forward(self, scores, labels): """ """ loss = self.kl(scores.softmax(-1), labels.softmax(-1)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_red_fused__softmax_div_mul_sub_sum_xlogy_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp17 / tmp24 tmp26 = tmp8 * tmp25 tmp27 = tmp16 - tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask, tmp30, _tmp29) tmp29 = tl.sum(_tmp29, 1)[:, None] tmp31 = 0.25 tmp32 = tmp29 * tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__softmax_div_mul_sub_sum_xlogy_1[grid(1)](buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1 ) del buf0 del buf2 return buf4, class KLDivTeacherListNew(nn.Module): def __init__(self): super(KLDivTeacherListNew, self).__init__() self.kl = torch.nn.KLDivLoss(reduction='batchmean') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PranjaliJain/matchmaker
KLDivTeacherList
false
14,236
[ "Apache-2.0" ]
97
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
https://github.com/PranjaliJain/matchmaker/tree/b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
KLDivTeacherPointwise
import torch import torch.nn as nn from torch.optim import * from torch.optim.lr_scheduler import * class KLDivTeacherPointwise(nn.Module): def __init__(self): super(KLDivTeacherPointwise, self).__init__() self.kl = torch.nn.KLDivLoss() def forward(self, scores_pos, scores_neg, label_pos, label_neg): """ """ loss = self.kl(scores_pos, label_pos) + self.kl(scores_neg, label_neg) return loss / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp15 = tl.load(in_ptr2 + r0, None) tmp22 = tl.load(in_ptr3 + r0, None) tmp1 = libdevice.isnan(tmp0).to(tl.int1) tmp2 = 0.0 tmp3 = tmp0 == tmp2 tmp4 = tl_math.log(tmp0) tmp5 = tmp0 * tmp4 tmp6 = tl.where(tmp3, tmp2, tmp5) tmp7 = float('nan') tmp8 = tl.where(tmp1, tmp7, tmp6) tmp10 = tmp0 * tmp9 tmp11 = tmp8 - tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp16 = libdevice.isnan(tmp15).to(tl.int1) tmp17 = tmp15 == tmp2 tmp18 = tl_math.log(tmp15) tmp19 = tmp15 * tmp18 tmp20 = tl.where(tmp17, tmp2, tmp19) tmp21 = tl.where(tmp16, tmp7, tmp20) tmp23 = tmp15 * tmp22 tmp24 = tmp21 - tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp14 / tmp28 tmp30 = tmp27 / tmp28 tmp31 = tmp29 + tmp30 tmp32 = 0.5 tmp33 = tmp31 * tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mean_mul_sub_xlogy_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class KLDivTeacherPointwiseNew(nn.Module): def __init__(self): super(KLDivTeacherPointwiseNew, self).__init__() self.kl = torch.nn.KLDivLoss() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
PranjaliJain/matchmaker
KLDivTeacherPointwise
false
14,237
[ "Apache-2.0" ]
97
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
https://github.com/PranjaliJain/matchmaker/tree/b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
NTimesTanh
import torch import torch.nn as nn class NTimesTanh(nn.Module): def __init__(self, N): super(NTimesTanh, self).__init__() self.N = N self.tanh = nn.Tanh() def forward(self, x): return self.tanh(x) * self.N def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'N': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NTimesTanhNew(nn.Module): def __init__(self, N): super(NTimesTanhNew, self).__init__() self.N = N self.tanh = nn.Tanh() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Prinsphield/ELEGANT
NTimesTanh
false
14,238
[ "MIT" ]
276
26827e679cbef2074693ffb0d3f36426e481f7f5
https://github.com/Prinsphield/ELEGANT/tree/26827e679cbef2074693ffb0d3f36426e481f7f5
DiffLoss
import torch import torch.utils.data import torch.optim import torch.utils.data.distributed class DiffLoss(torch.nn.Module): def __init__(self): super(DiffLoss, self).__init__() def forward(self, D1, D2): D1 = D1.view(D1.size(0), -1) D1_norm = torch.norm(D1, p=2, dim=1, keepdim=True).detach() D1_norm = D1.div(D1_norm.expand_as(D1) + 1e-06) D2 = D2.view(D2.size(0), -1) D2_norm = torch.norm(D2, p=2, dim=1, keepdim=True).detach() D2_norm = D2.div(D2_norm.expand_as(D2) + 1e-06) return torch.mean(D1_norm.mm(D2_norm.t()).pow(2)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_linalg_vector_norm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = tmp0 / tmp8 tl.store(out_ptr1 + (r1 + 64 * x0), tmp9, xmask) @triton.jit def triton_per_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_0[grid(4)](arg0_1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32) triton_per_fused_add_div_linalg_vector_norm_0[grid(4)](arg1_1, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(buf3, (64, 4), (1, 64), 0), out=buf4) del buf2 del buf3 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 triton_per_fused_mean_pow_1[grid(1)](buf6, buf4, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf4 return buf6, class DiffLossNew(torch.nn.Module): def __init__(self): super(DiffLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Prathyusha-Akundi/Adversarial-Continual-Learning
DiffLoss
false
14,239
[ "MIT" ]
237
edf4bbd2c4c61f1cc20818793702ef8c6cf4e0df
https://github.com/Prathyusha-Akundi/Adversarial-Continual-Learning/tree/edf4bbd2c4c61f1cc20818793702ef8c6cf4e0df
SoftCrossEntropyLoss
import torch import torch.nn as nn import torch.utils.data class SoftCrossEntropyLoss(nn.Module): """SoftCrossEntropyLoss (useful for label smoothing and mixup). Identical to torch.nn.CrossEntropyLoss if used with one-hot labels.""" def __init__(self): super(SoftCrossEntropyLoss, self).__init__() def forward(self, x, y): loss = -y * torch.nn.functional.log_softmax(x, -1) return torch.sum(loss) / x.shape[0] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp2 = tl.load(in_ptr1 + r2, None) tmp3 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp1 = -tmp0 tmp4 = tl_math.exp(tmp3) tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp14 = tl_math.log(tmp13) tmp15 = tmp2 - tmp14 tmp16 = tmp1 * tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, arg0_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class SoftCrossEntropyLossNew(nn.Module): """SoftCrossEntropyLoss (useful for label smoothing and mixup). Identical to torch.nn.CrossEntropyLoss if used with one-hot labels.""" def __init__(self): super(SoftCrossEntropyLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Pre-release/BAKE
SoftCrossEntropyLoss
false
14,240
[ "MIT" ]
67
2899b38d556a9151f55079c1b9888d462369aec8
https://github.com/Pre-release/BAKE/tree/2899b38d556a9151f55079c1b9888d462369aec8
DiagGaussianActor
import torch import torch.nn as nn class DiagGaussianActor(nn.Module): """torch.distributions implementation of an diagonal Gaussian policy.""" def __init__(self, log_std_bounds=[-5, 2]): super().__init__() self.log_std_bounds = log_std_bounds def forward(self, mu, log_std): log_std = torch.tanh(log_std) log_std_min, log_std_max = self.log_std_bounds log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std + 1) std = log_std.exp() mu = torch.tanh(mu) return mu, std def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_exp_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 3.5 tmp5 = tmp3 * tmp4 tmp6 = -5.0 tmp7 = tmp5 + tmp6 tmp8 = tl_math.exp(tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_exp_mul_tanh_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, buf1 class DiagGaussianActorNew(nn.Module): """torch.distributions implementation of an diagonal Gaussian policy.""" def __init__(self, log_std_bounds=[-5, 2]): super().__init__() self.log_std_bounds = log_std_bounds def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
Purple-PI/rlstructures
DiagGaussianActor
false
14,241
[ "MIT" ]
281
9b201b083715bbda2f3534b010c84e11dfc0a1c7
https://github.com/Purple-PI/rlstructures/tree/9b201b083715bbda2f3534b010c84e11dfc0a1c7
LocalFeatureEncoder
import torch import torch.nn as nn from abc import ABCMeta from torch.utils import model_zoo class BaseModule(nn.Module, metaclass=ABCMeta): @classmethod def load(cls, config, state_dict=None): model = cls.from_cfg(config) if model is not None and state_dict is not None: model.load_state_dict(state_dict) return model @classmethod def from_cfg(cls, config): raise NotImplementedError @staticmethod def parse_pytorch_file(file_path): if urlparse(file_path).scheme in ('http', 'https'): state_dict = model_zoo.load_url(file_path, progress=True) else: assert osp.exists(file_path), f'File does not exist: {file_path}' None state_dict = torch.load(file_path, map_location=torch.device('cpu') ) return state_dict class LocalFeatureEncoder(BaseModule): def __init__(self, num_joints, z_dim, point_feature_len): super().__init__() self.num_joints = num_joints self.z_dim = z_dim self.point_feature_len = point_feature_len self.net = nn.Conv1d(in_channels=self.num_joints * self.z_dim, out_channels=self.num_joints * self.point_feature_len, kernel_size=(1,), groups=self.num_joints) def get_out_dim(self): return self.point_feature_len @classmethod def from_cfg(cls, config): return cls(num_joints=config['num_joints'], z_dim=config['z_dim'], point_feature_len=config['point_feature_len']) def forward(self, shape_code, structure_code, pose_code, lbs_weights): """ skinning_weights: B x T x K """ B, T, K = lbs_weights.shape assert K == self.num_joints global_feature = [] if shape_code is not None: global_feature.append(shape_code) if structure_code is not None: global_feature.append(structure_code) if pose_code is not None: global_feature.append(pose_code) global_feature = torch.cat(global_feature, dim=-1) global_feature = global_feature.unsqueeze(1).repeat(1, K, 1) global_feature = global_feature.view(B, -1, 1) local_feature = self.net(global_feature).view(B, K, -1) local_feature = local_feature.unsqueeze(1).repeat(1, T, 1, 1) local_feature = local_feature.view(B * T, K, -1) lbs_weights = lbs_weights.view(B * T, 1, K) local_feature = torch.bmm(lbs_weights, local_feature).view(B, T, -1) return local_feature def get_inputs(): return [torch.rand([4, 1]), torch.rand([4, 1]), torch.rand([4, 2]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_joints': 4, 'z_dim': 4, 'point_feature_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from abc import ABCMeta from torch.utils import model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x2, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x2, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 4, tl.int64) tmp14 = tl.load(in_ptr2 + (2 * x2 + (-2 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (16, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_2, primals_3, primals_4, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 del primals_4 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 16, 1), (16, 1, 0), 0), primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf1, (4, 16, 1), (16, 1, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_repeat_1[grid(256)](buf1, primals_6, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf3 = reinterpret_tensor(buf1, (16, 1, 4), (4, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3) del buf2 return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0 ), primals_5, reinterpret_tensor(buf0, (4, 16, 1), (16, 1, 1), 0 ), reinterpret_tensor(primals_1, (16, 4, 1), (4, 1, 4), 0) class BaseModule(nn.Module, metaclass=ABCMeta): @classmethod def load(cls, config, state_dict=None): model = cls.from_cfg(config) if model is not None and state_dict is not None: model.load_state_dict(state_dict) return model @classmethod def from_cfg(cls, config): raise NotImplementedError @staticmethod def parse_pytorch_file(file_path): if urlparse(file_path).scheme in ('http', 'https'): state_dict = model_zoo.load_url(file_path, progress=True) else: assert osp.exists(file_path), f'File does not exist: {file_path}' None state_dict = torch.load(file_path, map_location=torch.device('cpu') ) return state_dict class LocalFeatureEncoderNew(BaseModule): def __init__(self, num_joints, z_dim, point_feature_len): super().__init__() self.num_joints = num_joints self.z_dim = z_dim self.point_feature_len = point_feature_len self.net = nn.Conv1d(in_channels=self.num_joints * self.z_dim, out_channels=self.num_joints * self.point_feature_len, kernel_size=(1,), groups=self.num_joints) def get_out_dim(self): return self.point_feature_len @classmethod def from_cfg(cls, config): return cls(num_joints=config['num_joints'], z_dim=config['z_dim'], point_feature_len=config['point_feature_len']) def forward(self, input_0, input_1, input_2, input_3): primals_5 = self.net.weight primals_6 = self.net.bias primals_2 = input_0 primals_3 = input_1 primals_4 = input_2 primals_1 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Pooya448/leap
LocalFeatureEncoder
false
14,242
[ "BSD-3-Clause" ]
55
b0562baaaad1d4c0bcd514e020185c32a86faf23
https://github.com/Pooya448/leap/tree/b0562baaaad1d4c0bcd514e020185c32a86faf23
SoftDiceLoss
import torch import numpy as np from torch import nn def sum_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.sum(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=True): inp = inp.sum(int(ax)) return inp def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) or shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x) if net_output.device.type == 'cuda': y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tp = sum_tensor(tp, axes, keepdim=False) fp = sum_tensor(fp, axes, keepdim=False) fn = sum_tensor(fn, axes, keepdim=False) return tp, fp, fn class SoftDiceLoss(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0, square=False): super(SoftDiceLoss, self).__init__() self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, x, y, loss_mask=None): shp_x = x.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if self.apply_nonlin is not None: x = self.apply_nonlin(x) tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square) dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth) if not self.do_bg: if self.batch_dice: dc = dc[1:] else: dc = dc[:, 1:] dc = dc.mean() return -dc def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp1 tmp5 = tmp0 * tmp4 tmp6 = tmp3 - tmp0 tmp7 = tmp6 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) tl.store(out_ptr2 + x0, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_rsub_0[grid(256)](arg0_1, arg1_1, buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1, buf2 def sum_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.sum(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=True): inp = inp.sum(int(ax)) return inp def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) or shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x) if net_output.device.type == 'cuda': y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tp = sum_tensor(tp, axes, keepdim=False) fp = sum_tensor(fp, axes, keepdim=False) fn = sum_tensor(fn, axes, keepdim=False) return tp, fp, fn class SoftDiceLossNew(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0, square=False): super(SoftDiceLossNew, self).__init__() self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Project-SwaG/igvc-software
SoftDiceLoss
false
14,243
[ "MIT" ]
100
cfe5ad5ae06199030544560af7e4ebf732cd3004
https://github.com/Project-SwaG/igvc-software/tree/cfe5ad5ae06199030544560af7e4ebf732cd3004
KDLoss
import torch import torch.nn as nn import torch.utils.data class KDLoss(nn.Module): def __init__(self, temp_factor): super(KDLoss, self).__init__() self.temp_factor = temp_factor self.kl_div = nn.KLDivLoss(reduction='sum') def forward(self, input, target): log_p = torch.log_softmax(input / self.temp_factor, dim=1) loss = self.kl_div(log_p, target) * self.temp_factor ** 2 / input.size( 0) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'temp_factor': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_sub_sum_xlogy_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp9 = tl.load(in_ptr1 + r3, None) tmp10 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp1 = libdevice.isnan(tmp0).to(tl.int1) tmp2 = 0.0 tmp3 = tmp0 == tmp2 tmp4 = tl_math.log(tmp0) tmp5 = tmp0 * tmp4 tmp6 = tl.where(tmp3, tmp2, tmp5) tmp7 = float('nan') tmp8 = tl.where(tmp1, tmp7, tmp6) tmp11 = tl_math.exp(tmp10) tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tl_math.log(tmp20) tmp22 = tmp9 - tmp21 tmp23 = tmp0 * tmp22 tmp24 = tmp8 - tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 16.0 tmp29 = tmp27 * tmp28 tmp30 = 0.25 tmp31 = tmp29 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_sub_sum_xlogy_1[grid(1)](buf2, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class KDLossNew(nn.Module): def __init__(self, temp_factor): super(KDLossNew, self).__init__() self.temp_factor = temp_factor self.kl_div = nn.KLDivLoss(reduction='sum') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Pre-release/BAKE
KDLoss
false
14,244
[ "MIT" ]
67
2899b38d556a9151f55079c1b9888d462369aec8
https://github.com/Pre-release/BAKE/tree/2899b38d556a9151f55079c1b9888d462369aec8
RAddFloat
import torch class RAddFloat(torch.nn.Module): def __init__(self): super(RAddFloat, self).__init__() def forward(self, x): return 1.0 + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RAddFloatNew(torch.nn.Module): def __init__(self): super(RAddFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RAddFloat
false
14,245
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
DQMLP
import torch import torch.nn as nn class DQMLP(nn.Module): def __init__(self, n_observations, n_actions, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear_adv = nn.Linear(n_hidden, n_actions) self.linear_value = nn.Linear(n_hidden, 1) self.n_actions = n_actions def forward_common(self, frame): z = torch.tanh(self.linear(frame)) return z def forward_value(self, z): return self.linear_value(z) def forward_advantage(self, z): adv = self.linear_adv(z) advm = adv.mean(1).unsqueeze(-1).repeat(1, self.n_actions) return adv - advm def forward(self, state): z = self.forward_common(state) v = self.forward_value(z) adv = self.forward_advantage(z) return v + adv def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_observations': 4, 'n_actions': 4, 'n_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_repeat_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp5 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp3 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 1), (1, 4 ), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_repeat_sub_1[grid(16)](buf2, primals_5, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 del buf3 del primals_5 return buf4, primals_3, buf1, primals_6, primals_4 class DQMLPNew(nn.Module): def __init__(self, n_observations, n_actions, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear_adv = nn.Linear(n_hidden, n_actions) self.linear_value = nn.Linear(n_hidden, 1) self.n_actions = n_actions def forward_common(self, frame): z = torch.tanh(self.linear(frame)) return z def forward_value(self, z): return self.linear_value(z) def forward_advantage(self, z): adv = self.linear_adv(z) advm = adv.mean(1).unsqueeze(-1).repeat(1, self.n_actions) return adv - advm def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = self.linear_adv.weight primals_7 = self.linear_adv.bias primals_4 = self.linear_value.weight primals_5 = self.linear_value.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Purple-PI/rlstructures
DQMLP
false
14,246
[ "MIT" ]
281
9b201b083715bbda2f3534b010c84e11dfc0a1c7
https://github.com/Purple-PI/rlstructures/tree/9b201b083715bbda2f3534b010c84e11dfc0a1c7
BaselineModel
import torch import torch.nn as nn class BaselineModel(nn.Module): """The model that computes V(s)""" def __init__(self, n_observations, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, 1) def forward(self, frame): z = torch.tanh(self.linear(frame)) critic = self.linear2(z) return critic def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_observations': 4, 'n_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4 class BaselineModelNew(nn.Module): """The model that computes V(s)""" def __init__(self, n_observations, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, 1) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Purple-PI/rlstructures
BaselineModel
false
14,247
[ "MIT" ]
281
9b201b083715bbda2f3534b010c84e11dfc0a1c7
https://github.com/Purple-PI/rlstructures/tree/9b201b083715bbda2f3534b010c84e11dfc0a1c7
ActorNetwork
import torch import torch.nn as nn import torch.nn.functional as F class ActorNetwork(nn.Module): def __init__(self, input_shape, output_shape, **kwargs): super(ActorNetwork, self).__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h = nn.Linear(n_input, n_output) nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain ('relu')) def forward(self, state): return F.relu(self._h(torch.squeeze(state, 1).float())) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': [4, 4], 'output_shape': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2 class ActorNetworkNew(nn.Module): def __init__(self, input_shape, output_shape, **kwargs): super(ActorNetworkNew, self).__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h = nn.Linear(n_input, n_output) nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain ('relu')) def forward(self, input_0): primals_2 = self._h.weight primals_3 = self._h.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
PuzeLiu/mushroom-rl
ActorNetwork
false
14,248
[ "MIT" ]
344
99942b425e66b4ddcc26009d7105dde23841e95d
https://github.com/PuzeLiu/mushroom-rl/tree/99942b425e66b4ddcc26009d7105dde23841e95d
CriticNetwork
import torch import torch.nn as nn import torch.nn.functional as F class CriticNetwork(nn.Module): def __init__(self, input_shape, output_shape, **kwargs): super().__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h = nn.Linear(n_input, n_output) nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain ('relu')) def forward(self, state, action): state_action = torch.cat((state.float(), action.float()), dim=1) q = F.relu(self._h(state_action)) return torch.squeeze(q) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': [4, 4], 'output_shape': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_relu_squeeze_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) triton_poi_fused_relu_squeeze_threshold_backward_1[grid(512)](buf1, primals_4, buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_4 return buf2, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf3 class CriticNetworkNew(nn.Module): def __init__(self, input_shape, output_shape, **kwargs): super().__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h = nn.Linear(n_input, n_output) nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain ('relu')) def forward(self, input_0, input_1): primals_3 = self._h.weight primals_4 = self._h.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
PuzeLiu/mushroom-rl
CriticNetwork
false
14,249
[ "MIT" ]
344
99942b425e66b4ddcc26009d7105dde23841e95d
https://github.com/PuzeLiu/mushroom-rl/tree/99942b425e66b4ddcc26009d7105dde23841e95d
Normalization
import torch import torch.nn as nn from torch.nn.parameter import Parameter class Normalization(nn.Module): def __init__(self): super(Normalization, self).__init__() self.alpha = Parameter(torch.ones(1)) self.beta = Parameter(torch.zeros(1)) def forward(self, x): x = torch.nn.functional.normalize(x, dim=1) return x * self.alpha + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp19 = tl.load(in_ptr2 + 0) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp15 * tmp17 tmp21 = tmp18 + tmp20 tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class NormalizationNew(nn.Module): def __init__(self): super(NormalizationNew, self).__init__() self.alpha = Parameter(torch.ones(1)) self.beta = Parameter(torch.zeros(1)) def forward(self, input_0): primals_2 = self.alpha primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Prinsphield/ELEGANT
Normalization
false
14,250
[ "MIT" ]
276
26827e679cbef2074693ffb0d3f36426e481f7f5
https://github.com/Prinsphield/ELEGANT/tree/26827e679cbef2074693ffb0d3f36426e481f7f5
Network
import torch import torch.nn as nn class Network(nn.Module): def __init__(self, input_shape, output_shape, n_features, **kwargs): super(Network, self).__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h1 = nn.Linear(n_input, n_features) self._h2 = nn.Linear(n_features, n_features) self._h3 = nn.Linear(n_features, n_output) nn.init.xavier_uniform_(self._h1.weight, gain=nn.init. calculate_gain('tanh')) nn.init.xavier_uniform_(self._h2.weight, gain=nn.init. calculate_gain('tanh')) nn.init.xavier_uniform_(self._h3.weight, gain=nn.init. calculate_gain('linear')) def forward(self, state, **kwargs): features1 = torch.tanh(self._h1(torch.squeeze(state, -1).float())) features2 = torch.tanh(self._h2(features1)) a = self._h3(features2) return a def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': [4, 4], 'output_shape': [4, 4], 'n_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class NetworkNew(nn.Module): def __init__(self, input_shape, output_shape, n_features, **kwargs): super(NetworkNew, self).__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h1 = nn.Linear(n_input, n_features) self._h2 = nn.Linear(n_features, n_features) self._h3 = nn.Linear(n_features, n_output) nn.init.xavier_uniform_(self._h1.weight, gain=nn.init. calculate_gain('tanh')) nn.init.xavier_uniform_(self._h2.weight, gain=nn.init. calculate_gain('tanh')) nn.init.xavier_uniform_(self._h3.weight, gain=nn.init. calculate_gain('linear')) def forward(self, input_0): primals_2 = self._h1.weight primals_3 = self._h1.bias primals_4 = self._h2.weight primals_5 = self._h2.bias primals_6 = self._h3.weight primals_7 = self._h3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
PuzeLiu/mushroom-rl
Network
false
14,251
[ "MIT" ]
344
99942b425e66b4ddcc26009d7105dde23841e95d
https://github.com/PuzeLiu/mushroom-rl/tree/99942b425e66b4ddcc26009d7105dde23841e95d
DIAYNBaselineModel
import torch import torch.nn as nn class DIAYNBaselineModel(nn.Module): """The model that computes V(s)""" def __init__(self, n_observations, n_hidden, n_policies): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, n_policies) self.n_policies = n_policies def forward(self, frame, idx_policy): z = torch.tanh(self.linear(frame)) critic = self.linear2(z) critic = critic.reshape(critic.size()[0], self.n_policies, 1) critic = critic[torch.arange(critic.size()[0]), idx_policy] return critic def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'n_observations': 4, 'n_hidden': 4, 'n_policies': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_arange_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_arange_1[grid(4)](buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused_index_2[grid(4)](primals_6, buf2, buf4, 4, XBLOCK= 4, num_warps=1, num_stages=1) del buf2 return buf4, primals_3, primals_6, buf1, buf3, primals_4 class DIAYNBaselineModelNew(nn.Module): """The model that computes V(s)""" def __init__(self, n_observations, n_hidden, n_policies): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, n_policies) self.n_policies = n_policies def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = self.linear2.weight primals_5 = self.linear2.bias primals_4 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Purple-PI/rlstructures
DIAYNBaselineModel
false
14,252
[ "MIT" ]
281
9b201b083715bbda2f3534b010c84e11dfc0a1c7
https://github.com/Purple-PI/rlstructures/tree/9b201b083715bbda2f3534b010c84e11dfc0a1c7
SoftAttention
import torch import torch.utils.data import torch.nn as nn class SoftAttention(torch.nn.Module): """ v = tanh(hW + b) w = softmax(v*u) out = sum wh see eqs 5-7 in https://www.sciencedirect.com/science/article/abs/pii/S0924271619300115 """ def __init__(self, hidden_dim): super(SoftAttention, self).__init__() self.linear = nn.Linear(in_features=hidden_dim, out_features= hidden_dim, bias=True) self.tanh = nn.Tanh() self.ua = nn.Parameter(torch.Tensor(hidden_dim)) torch.nn.init.normal_(self.ua, mean=0.0, std=0.1) self.softmax = nn.Softmax(dim=1) def forward(self, x): N, *_ = x.shape va = self.tanh(self.linear(x)) batchwise_ua = self.ua.repeat(N, 1) omega = self.softmax(torch.bmm(va, batchwise_ua.unsqueeze(-1))) rnn_feat = torch.bmm(x.transpose(1, 2), omega).squeeze(-1) return rnn_feat def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_1[grid(16)](primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 0), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf5, out=buf6) return reinterpret_tensor(buf6, (4, 4), (4, 1), 0 ), primals_1, buf1, buf5, reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0) class SoftAttentionNew(torch.nn.Module): """ v = tanh(hW + b) w = softmax(v*u) out = sum wh see eqs 5-7 in https://www.sciencedirect.com/science/article/abs/pii/S0924271619300115 """ def __init__(self, hidden_dim): super(SoftAttentionNew, self).__init__() self.linear = nn.Linear(in_features=hidden_dim, out_features= hidden_dim, bias=True) self.tanh = nn.Tanh() self.ua = nn.Parameter(torch.Tensor(hidden_dim)) torch.nn.init.normal_(self.ua, mean=0.0, std=0.1) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_3 = self.ua primals_2 = self.linear.weight primals_4 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Pratyush1991/crop-type-mapping
SoftAttention
false
14,253
[ "MIT" ]
94
d9d99ec92c3a090ec5576f9e46c89dfcc6f50cf3
https://github.com/Pratyush1991/crop-type-mapping/tree/d9d99ec92c3a090ec5576f9e46c89dfcc6f50cf3
AgentModel
import torch import torch.nn as nn class AgentModel(nn.Module): """The model that computes one score per action""" def __init__(self, n_observations, n_actions, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, n_actions) def forward(self, frame): z = torch.tanh(self.linear(frame)) score_actions = self.linear2(z) probabilities_actions = torch.softmax(score_actions, dim=-1) return probabilities_actions def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_observations': 4, 'n_actions': 4, 'n_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_4 class AgentModelNew(nn.Module): """The model that computes one score per action""" def __init__(self, n_observations, n_actions, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, n_actions) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Purple-PI/rlstructures
AgentModel
false
14,254
[ "MIT" ]
281
9b201b083715bbda2f3534b010c84e11dfc0a1c7
https://github.com/Purple-PI/rlstructures/tree/9b201b083715bbda2f3534b010c84e11dfc0a1c7
ScaledDotProductAttention
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.distributed class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -1000000000.0) attn = F.softmax(attn, dim=-1) output = torch.matmul(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf1 ) del arg1_1 buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3 class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
QiuhongAnnaWei/IBRNet
ScaledDotProductAttention
false
14,255
[ "Apache-2.0" ]
254
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
https://github.com/QiuhongAnnaWei/IBRNet/tree/6c8b68e6d95eae04535ff0906387ec7899f5d5ce
InfoNCE
import torch import torch.nn.functional as F from torch import nn def normalize(*xs): return [(None if x is None else F.normalize(x, dim=-1)) for x in xs] def transpose(x): return x.transpose(-2, -1) def info_nce(query, positive_key, negative_keys=None, temperature=0.1, reduction='mean', negative_mode='unpaired'): if query.dim() != 2: raise ValueError('<query> must have 2 dimensions.') if positive_key.dim() != 2: raise ValueError('<positive_key> must have 2 dimensions.') if negative_keys is not None: if negative_mode == 'unpaired' and negative_keys.dim() != 2: raise ValueError( "<negative_keys> must have 2 dimensions if <negative_mode> == 'unpaired'." ) if negative_mode == 'paired' and negative_keys.dim() != 3: raise ValueError( "<negative_keys> must have 3 dimensions if <negative_mode> == 'paired'." ) if len(query) != len(positive_key): raise ValueError( '<query> and <positive_key> must must have the same number of samples.' ) if negative_keys is not None: if negative_mode == 'paired' and len(query) != len(negative_keys): raise ValueError( "If negative_mode == 'paired', then <negative_keys> must have the same number of samples as <query>." ) if query.shape[-1] != positive_key.shape[-1]: raise ValueError( 'Vectors of <query> and <positive_key> should have the same number of components.' ) if negative_keys is not None: if query.shape[-1] != negative_keys.shape[-1]: raise ValueError( 'Vectors of <query> and <negative_keys> should have the same number of components.' ) query, positive_key, negative_keys = normalize(query, positive_key, negative_keys) if negative_keys is not None: positive_logit = torch.sum(query * positive_key, dim=1, keepdim=True) if negative_mode == 'unpaired': negative_logits = query @ transpose(negative_keys) elif negative_mode == 'paired': query = query.unsqueeze(1) negative_logits = query @ transpose(negative_keys) negative_logits = negative_logits.squeeze(1) logits = torch.cat([positive_logit, negative_logits], dim=1) labels = torch.zeros(len(logits), dtype=torch.long, device=query.device ) else: logits = query @ transpose(positive_key) labels = torch.arange(len(query), device=query.device) return F.cross_entropy(logits / temperature, labels, reduction=reduction) class InfoNCE(nn.Module): """ Calculates the InfoNCE loss for self-supervised learning. This contrastive loss enforces the embeddings of similar (positive) samples to be close and those of different (negative) samples to be distant. A query embedding is compared with one positive key and with one or more negative keys. References: https://arxiv.org/abs/1807.03748v2 https://arxiv.org/abs/2010.05113 Args: temperature: Logits are divided by temperature before calculating the cross entropy. reduction: Reduction method applied to the output. Value must be one of ['none', 'sum', 'mean']. See torch.nn.functional.cross_entropy for more details about each option. negative_mode: Determines how the (optional) negative_keys are handled. Value must be one of ['paired', 'unpaired']. If 'paired', then each query sample is paired with a number of negative keys. Comparable to a triplet loss, but with multiple negatives per sample. If 'unpaired', then the set of negative keys are all unrelated to any positive key. Input shape: query: (N, D) Tensor with query samples (e.g. embeddings of the input). positive_key: (N, D) Tensor with positive samples (e.g. embeddings of augmented input). negative_keys (optional): Tensor with negative samples (e.g. embeddings of other inputs) If negative_mode = 'paired', then negative_keys is a (N, M, D) Tensor. If negative_mode = 'unpaired', then negative_keys is a (M, D) Tensor. If None, then the negative keys for a sample are the positive keys for the other samples. Returns: Value of the InfoNCE Loss. Examples: >>> loss = InfoNCE() >>> batch_size, num_negative, embedding_size = 32, 48, 128 >>> query = torch.randn(batch_size, embedding_size) >>> positive_key = torch.randn(batch_size, embedding_size) >>> negative_keys = torch.randn(num_negative, embedding_size) >>> output = loss(query, positive_key, negative_keys) """ def __init__(self, temperature=0.1, reduction='mean', negative_mode= 'unpaired'): super().__init__() self.temperature = temperature self.reduction = reduction self.negative_mode = negative_mode def forward(self, query, positive_key, negative_keys=None): return info_nce(query, positive_key, negative_keys, temperature= self.temperature, reduction=self.reduction, negative_mode=self. negative_mode) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 10.0 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_per_fused_arange_nll_loss_forward_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp6 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp0 = r0 tmp1 = tl.full([1, 1], -100, tl.int64) tmp2 = tmp0 != tmp1 tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp0, tmp3) tmp5 = tl.load(in_ptr0 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = tl_math.exp(tmp6) tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = tmp5 - tmp17 tmp19 = -tmp18 tmp20 = 0.0 tmp21 = tl.where(tmp2, tmp19, tmp20) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = tmp2.to(tl.int64) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = tmp28.to(tl.float32) tmp30 = tmp24 / tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf0 buf3 = buf1 del buf1 triton_poi_fused_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf6 = buf4 del buf4 triton_per_fused_arange_nll_loss_forward_2[grid(1)](buf6, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf3 return buf6, def normalize(*xs): return [(None if x is None else F.normalize(x, dim=-1)) for x in xs] def transpose(x): return x.transpose(-2, -1) def info_nce(query, positive_key, negative_keys=None, temperature=0.1, reduction='mean', negative_mode='unpaired'): if query.dim() != 2: raise ValueError('<query> must have 2 dimensions.') if positive_key.dim() != 2: raise ValueError('<positive_key> must have 2 dimensions.') if negative_keys is not None: if negative_mode == 'unpaired' and negative_keys.dim() != 2: raise ValueError( "<negative_keys> must have 2 dimensions if <negative_mode> == 'unpaired'." ) if negative_mode == 'paired' and negative_keys.dim() != 3: raise ValueError( "<negative_keys> must have 3 dimensions if <negative_mode> == 'paired'." ) if len(query) != len(positive_key): raise ValueError( '<query> and <positive_key> must must have the same number of samples.' ) if negative_keys is not None: if negative_mode == 'paired' and len(query) != len(negative_keys): raise ValueError( "If negative_mode == 'paired', then <negative_keys> must have the same number of samples as <query>." ) if query.shape[-1] != positive_key.shape[-1]: raise ValueError( 'Vectors of <query> and <positive_key> should have the same number of components.' ) if negative_keys is not None: if query.shape[-1] != negative_keys.shape[-1]: raise ValueError( 'Vectors of <query> and <negative_keys> should have the same number of components.' ) query, positive_key, negative_keys = normalize(query, positive_key, negative_keys) if negative_keys is not None: positive_logit = torch.sum(query * positive_key, dim=1, keepdim=True) if negative_mode == 'unpaired': negative_logits = query @ transpose(negative_keys) elif negative_mode == 'paired': query = query.unsqueeze(1) negative_logits = query @ transpose(negative_keys) negative_logits = negative_logits.squeeze(1) logits = torch.cat([positive_logit, negative_logits], dim=1) labels = torch.zeros(len(logits), dtype=torch.long, device=query.device ) else: logits = query @ transpose(positive_key) labels = torch.arange(len(query), device=query.device) return F.cross_entropy(logits / temperature, labels, reduction=reduction) class InfoNCENew(nn.Module): """ Calculates the InfoNCE loss for self-supervised learning. This contrastive loss enforces the embeddings of similar (positive) samples to be close and those of different (negative) samples to be distant. A query embedding is compared with one positive key and with one or more negative keys. References: https://arxiv.org/abs/1807.03748v2 https://arxiv.org/abs/2010.05113 Args: temperature: Logits are divided by temperature before calculating the cross entropy. reduction: Reduction method applied to the output. Value must be one of ['none', 'sum', 'mean']. See torch.nn.functional.cross_entropy for more details about each option. negative_mode: Determines how the (optional) negative_keys are handled. Value must be one of ['paired', 'unpaired']. If 'paired', then each query sample is paired with a number of negative keys. Comparable to a triplet loss, but with multiple negatives per sample. If 'unpaired', then the set of negative keys are all unrelated to any positive key. Input shape: query: (N, D) Tensor with query samples (e.g. embeddings of the input). positive_key: (N, D) Tensor with positive samples (e.g. embeddings of augmented input). negative_keys (optional): Tensor with negative samples (e.g. embeddings of other inputs) If negative_mode = 'paired', then negative_keys is a (N, M, D) Tensor. If negative_mode = 'unpaired', then negative_keys is a (M, D) Tensor. If None, then the negative keys for a sample are the positive keys for the other samples. Returns: Value of the InfoNCE Loss. Examples: >>> loss = InfoNCE() >>> batch_size, num_negative, embedding_size = 32, 48, 128 >>> query = torch.randn(batch_size, embedding_size) >>> positive_key = torch.randn(batch_size, embedding_size) >>> negative_keys = torch.randn(num_negative, embedding_size) >>> output = loss(query, positive_key, negative_keys) """ def __init__(self, temperature=0.1, reduction='mean', negative_mode= 'unpaired'): super().__init__() self.temperature = temperature self.reduction = reduction self.negative_mode = negative_mode def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
RElbers/info-nce-pytorch
InfoNCE
false
14,256
[ "MIT" ]
59
37ceef781b3fb89557c0d2b401a9fadf74be8791
https://github.com/RElbers/info-nce-pytorch/tree/37ceef781b3fb89557c0d2b401a9fadf74be8791
SACQ
import torch import torch.nn as nn class SACQ(nn.Module): def __init__(self, n_observations, action_dim, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear_2 = nn.Linear(action_dim, n_hidden) self.linear_q = nn.Linear(n_hidden * 2, n_hidden) self.linear_qq = nn.Linear(n_hidden, 1) def forward(self, frame, action): zf = torch.relu(self.linear(frame)) za = torch.relu(self.linear_2(action)) q = torch.relu(self.linear_q(torch.cat([zf, za], dim=1))) q = self.linear_qq(q) return q def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_observations': 4, 'action_dim': 4, 'n_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr3 + (-4 + x0), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](buf0, primals_2, buf1, primals_5, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_7, (8, 4), (1, 8 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(16)](buf4, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_10 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(16)](buf1, primals_5, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del primals_5 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(16)](buf0, primals_2, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_2 return (buf6, primals_3, primals_6, buf2, buf4, primals_9, primals_7, buf7, buf8) class SACQNew(nn.Module): def __init__(self, n_observations, action_dim, n_hidden): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear_2 = nn.Linear(action_dim, n_hidden) self.linear_q = nn.Linear(n_hidden * 2, n_hidden) self.linear_qq = nn.Linear(n_hidden, 1) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = self.linear_2.weight primals_5 = self.linear_2.bias primals_7 = self.linear_q.weight primals_8 = self.linear_q.bias primals_9 = self.linear_qq.weight primals_10 = self.linear_qq.bias primals_4 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Purple-PI/rlstructures
SACQ
false
14,257
[ "MIT" ]
281
9b201b083715bbda2f3534b010c84e11dfc0a1c7
https://github.com/Purple-PI/rlstructures/tree/9b201b083715bbda2f3534b010c84e11dfc0a1c7
conv
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.distributed class conv(nn.Module): def __init__(self, num_in_layers, num_out_layers, kernel_size, stride): super(conv, self).__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(num_in_layers, num_out_layers, kernel_size= kernel_size, stride=stride, padding=(self.kernel_size - 1) // 2, padding_mode='reflect') self.bn = nn.InstanceNorm2d(num_out_layers, track_running_stats= False, affine=True) def forward(self, x): return F.elu(self.bn(self.conv(x)), inplace=True) def get_inputs(): return [torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [[], {'num_in_layers': 1, 'num_out_layers': 1, 'kernel_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_elu_repeat_1( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1]) tmp2 = tl.load(in_out_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp30 = tl.load(in_ptr2 + 0) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(rmask & xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 9, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(rmask & xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 9.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp1 tmp32 = tmp29 + tmp31 tmp33 = 0.0 tmp34 = tmp32 > tmp33 tmp35 = 1.0 tmp36 = tmp32 * tmp35 tmp37 = libdevice.expm1(tmp36) tmp38 = tmp37 * tmp35 tmp39 = tl.where(tmp34, tmp36, tmp38) tl.store(out_ptr0 + x0, tmp1, xmask) tl.store(in_out_ptr0 + (r1 + 9 * x0), tmp5, rmask & xmask) tl.store(in_out_ptr1 + (r1 + 9 * x0), tmp39, rmask & xmask) tl.store(out_ptr3 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(144)](primals_3, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 3, 3), (9, 9, 3, 1)) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = buf1 del buf1 buf4 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) buf8 = empty_strided_cuda((1, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf9 = reinterpret_tensor(buf8, (4, 1, 3, 3), (9, 1, 3, 1), 0) del buf8 buf7 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_elu_repeat_1[grid (4)](buf2, buf9, primals_4, primals_2, primals_5, buf3, buf4, buf7, 4, 9, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_4 del primals_5 return reinterpret_tensor(buf9, (4, 1, 3, 3), (9, 9, 3, 1), 0 ), primals_1, buf0, buf2, buf3, reinterpret_tensor(buf7, (4,), (1,), 0 ), reinterpret_tensor(buf9, (4, 1, 3, 3), (9, 9, 3, 1), 0 ), reinterpret_tensor(buf4, (1, 4, 1, 1), (4, 1, 1, 1), 0) class convNew(nn.Module): def __init__(self, num_in_layers, num_out_layers, kernel_size, stride): super(convNew, self).__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(num_in_layers, num_out_layers, kernel_size= kernel_size, stride=stride, padding=(self.kernel_size - 1) // 2, padding_mode='reflect') self.bn = nn.InstanceNorm2d(num_out_layers, track_running_stats= False, affine=True) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.bn.weight primals_5 = self.bn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
QiuhongAnnaWei/IBRNet
conv
false
14,258
[ "Apache-2.0" ]
254
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
https://github.com/QiuhongAnnaWei/IBRNet/tree/6c8b68e6d95eae04535ff0906387ec7899f5d5ce
TokenMixer
import torch import torch.nn.functional as F from torch import nn class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class TokenMixer(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(seq_len, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = x.transpose(1, 2) x = self.mlp(x) x = x.transpose(1, 2) out = x + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'seq_len': 4, 'expansion_factor': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x5 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp8, xmask) @triton.jit def triton_poi_fused_add_gelu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_add_gelu_2[grid(1024)](buf3, primals_5, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 16, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf6, primals_1, primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0 ), primals_6, primals_4 class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class TokenMixerNew(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(seq_len, expansion_factor, dropout) def forward(self, input_0): primals_2 = self.norm.weight primals_3 = self.norm.bias primals_4 = self.mlp.fc1.weight primals_5 = self.mlp.fc1.bias primals_6 = self.mlp.fc2.weight primals_7 = self.mlp.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
RAYTRAC3R/mlp-singer
TokenMixer
false
14,259
[ "MIT" ]
82
a68299b943815353fcc177e4873d24d1d0937cfb
https://github.com/RAYTRAC3R/mlp-singer/tree/a68299b943815353fcc177e4873d24d1d0937cfb
DIAYNActionModel
import torch import torch.nn as nn class DIAYNActionModel(nn.Module): """The model that computes one score per action""" def __init__(self, n_observations, n_actions, n_hidden, n_policies): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, n_actions * n_policies) self.n_policies = n_policies self.n_actions = n_actions def forward(self, frame, idx_policy): z = torch.tanh(self.linear(frame)) score_actions = self.linear2(z) s = score_actions.size() score_actions = score_actions.reshape(s[0], self.n_policies, self. n_actions) score_actions = score_actions[torch.arange(s[0]), idx_policy] probabilities_actions = torch.softmax(score_actions, dim=-1) return probabilities_actions def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'n_observations': 4, 'n_actions': 4, 'n_hidden': 4, 'n_policies': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_arange_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_index_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (4 * tmp4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * tmp4 + 16 * x0), xmask, eviction_policy='evict_last') tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.load(in_ptr1 + (2 + 4 * tmp4 + 16 * x0), xmask, eviction_policy='evict_last') tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = tl.load(in_ptr1 + (3 + 4 * tmp4 + 16 * x0), xmask, eviction_policy='evict_last') tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp6 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp7 - tmp12 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tmp9 - tmp12 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp12 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tl.store(out_ptr0 + x0, tmp12, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused__softmax_index_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4 + 16 * x1), xmask) tmp8 = tmp6 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_arange_1[grid(4)](buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_index_2[grid(4)](primals_6, buf2, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_index_3[grid(16)](primals_6, buf2, buf4, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 del buf4 del buf5 return buf6, primals_3, primals_6, buf1, buf3, buf6, primals_4 class DIAYNActionModelNew(nn.Module): """The model that computes one score per action""" def __init__(self, n_observations, n_actions, n_hidden, n_policies): super().__init__() self.linear = nn.Linear(n_observations, n_hidden) self.linear2 = nn.Linear(n_hidden, n_actions * n_policies) self.n_policies = n_policies self.n_actions = n_actions def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Purple-PI/rlstructures
DIAYNActionModel
false
14,260
[ "MIT" ]
281
9b201b083715bbda2f3534b010c84e11dfc0a1c7
https://github.com/Purple-PI/rlstructures/tree/9b201b083715bbda2f3534b010c84e11dfc0a1c7
PositionwiseFeedForward
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.distributed class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x += residual x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf2, primals_1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
QiuhongAnnaWei/IBRNet
PositionwiseFeedForward
false
14,261
[ "Apache-2.0" ]
254
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
https://github.com/QiuhongAnnaWei/IBRNet/tree/6c8b68e6d95eae04535ff0906387ec7899f5d5ce
SoftQNetwork
import torch import torch.nn.functional as F import torch.nn as nn class SoftQNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size=[400, 300], init_w=0.003): super(SoftQNetwork, self).__init__() self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size[0]) self.linear2 = nn.Linear(hidden_size[0], hidden_size[1]) self.linear3 = nn.Linear(hidden_size[1], 1) self.ln1 = nn.LayerNorm(hidden_size[0]) self.ln2 = nn.LayerNorm(hidden_size[1]) self.linear3.weight.data.uniform_(-init_w, init_w) self.linear3.bias.data.uniform_(-init_w, init_w) def forward(self, state, action): x = torch.cat([state, action], 1) x = self.ln1(F.relu(self.linear1(x))) x = self.ln2(F.relu(self.linear2(x))) x = self.linear3(x) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_per_fused_native_layer_norm_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 400 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 400 * x0), rmask, other=0.0) tmp26 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tl.where(rmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [RBLOCK]) tmp8 = tl.where(rmask, tmp6, 0) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp10 = tl.full([1], 400, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = tl.where(rmask, tmp15, 0) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp19 = 400.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp2 - tmp12 tmp25 = tmp24 * tmp23 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp23, None) tl.store(out_ptr1 + (r1 + 400 * x0), tmp29, rmask) tl.store(out_ptr0 + x0, tmp12, None) @triton.jit def triton_per_fused_native_layer_norm_relu_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 300 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 300 * x0), rmask, other=0.0) tmp26 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tl.where(rmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [RBLOCK]) tmp8 = tl.where(rmask, tmp6, 0) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp10 = tl.full([1], 300, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = tl.where(rmask, tmp15, 0) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp19 = 300.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp2 - tmp12 tmp25 = tmp24 * tmp23 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp23, None) tl.store(out_ptr1 + (r1 + 300 * x0), tmp29, rmask) tl.store(out_ptr0 + x0, tmp12, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (400,), (1,)) assert_size_stride(primals_6, (400,), (1,)) assert_size_stride(primals_7, (300, 400), (400, 1)) assert_size_stride(primals_8, (300,), (1,)) assert_size_stride(primals_9, (300,), (1,)) assert_size_stride(primals_10, (300,), (1,)) assert_size_stride(primals_11, (1, 300), (300, 1)) assert_size_stride(primals_12, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 400), (400, 1), torch.float32) triton_per_fused_native_layer_norm_relu_1[grid(4)](buf5, buf1, primals_5, primals_6, buf2, buf6, 4, 400, num_warps=4, num_stages=1 ) del primals_6 buf7 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7, (400, 300), (1, 400), 0), alpha=1, beta=1, out=buf7) del primals_8 buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = reinterpret_tensor(buf9, (4, 1), (1, 1), 0) del buf9 buf12 = empty_strided_cuda((4, 300), (300, 1), torch.float32) triton_per_fused_native_layer_norm_relu_2[grid(4)](buf11, buf7, primals_9, primals_10, buf8, buf12, 4, 300, num_warps=4, num_stages=1) del primals_10 buf14 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_12, buf12, reinterpret_tensor( primals_11, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14) del primals_12 return (buf14, primals_5, primals_9, buf0, buf1, buf2, buf5, buf6, buf7, buf8, buf11, buf12, primals_11, primals_7) class SoftQNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size=[400, 300], init_w=0.003): super(SoftQNetworkNew, self).__init__() self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size[0]) self.linear2 = nn.Linear(hidden_size[0], hidden_size[1]) self.linear3 = nn.Linear(hidden_size[1], 1) self.ln1 = nn.LayerNorm(hidden_size[0]) self.ln2 = nn.LayerNorm(hidden_size[1]) self.linear3.weight.data.uniform_(-init_w, init_w) self.linear3.bias.data.uniform_(-init_w, init_w) def forward(self, input_0, input_1): primals_3 = self.linear1.weight primals_4 = self.linear1.bias primals_7 = self.linear2.weight primals_8 = self.linear2.bias primals_11 = self.linear3.weight primals_12 = self.linear3.bias primals_5 = self.ln1.weight primals_6 = self.ln1.bias primals_9 = self.ln2.weight primals_10 = self.ln2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
QasimWani/CityLearn
SoftQNetwork
false
14,262
[ "MIT" ]
202
ffc0584508dc9c796c97e6b908b75380b9bc6606
https://github.com/QasimWani/CityLearn/tree/ffc0584508dc9c796c97e6b908b75380b9bc6606
upconv
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.distributed class conv(nn.Module): def __init__(self, num_in_layers, num_out_layers, kernel_size, stride): super(conv, self).__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(num_in_layers, num_out_layers, kernel_size= kernel_size, stride=stride, padding=(self.kernel_size - 1) // 2, padding_mode='reflect') self.bn = nn.InstanceNorm2d(num_out_layers, track_running_stats= False, affine=True) def forward(self, x): return F.elu(self.bn(self.conv(x)), inplace=True) class upconv(nn.Module): def __init__(self, num_in_layers, num_out_layers, kernel_size, scale): super(upconv, self).__init__() self.scale = scale self.conv = conv(num_in_layers, num_out_layers, kernel_size, 1) def forward(self, x): x = nn.functional.interpolate(x, scale_factor=self.scale, align_corners=True, mode='bilinear') return self.conv(x) def get_inputs(): return [torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [[], {'num_in_layers': 1, 'num_out_layers': 1, 'kernel_size': 4, 'scale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(in_out_ptr0 + x4, tmp38, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_elu_repeat_2( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1]) tmp2 = tl.load(in_out_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp30 = tl.load(in_ptr2 + 0) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(rmask & xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 9, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(rmask & xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 9.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp1 tmp32 = tmp29 + tmp31 tmp33 = 0.0 tmp34 = tmp32 > tmp33 tmp35 = 1.0 tmp36 = tmp32 * tmp35 tmp37 = libdevice.expm1(tmp36) tmp38 = tmp37 * tmp35 tmp39 = tl.where(tmp34, tmp36, tmp38) tl.store(out_ptr0 + x0, tmp1, xmask) tl.store(in_out_ptr0 + (r1 + 9 * x0), tmp5, rmask & xmask) tl.store(in_out_ptr1 + (r1 + 9 * x0), tmp39, rmask & xmask) tl.store(out_ptr3 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (64)](buf1, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32) triton_poi_fused_reflection_pad2d_1[grid(144)](buf1, buf2, 144, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 3, 3), (9, 9, 3, 1)) buf5 = empty_strided_cuda((4,), (1,), torch.float32) buf4 = buf3 del buf3 buf6 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) buf10 = empty_strided_cuda((1, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf11 = reinterpret_tensor(buf10, (4, 1, 3, 3), (9, 1, 3, 1), 0) del buf10 buf9 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_elu_repeat_2[grid (4)](buf4, buf11, primals_4, primals_3, primals_5, buf5, buf6, buf9, 4, 9, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 del primals_4 del primals_5 return reinterpret_tensor(buf11, (4, 1, 3, 3), (9, 9, 3, 1), 0 ), primals_2, buf2, buf4, buf5, reinterpret_tensor(buf9, (4,), (1,), 0 ), reinterpret_tensor(buf11, (4, 1, 3, 3), (9, 9, 3, 1), 0 ), reinterpret_tensor(buf6, (1, 4, 1, 1), (4, 1, 1, 1), 0) class conv(nn.Module): def __init__(self, num_in_layers, num_out_layers, kernel_size, stride): super(conv, self).__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(num_in_layers, num_out_layers, kernel_size= kernel_size, stride=stride, padding=(self.kernel_size - 1) // 2, padding_mode='reflect') self.bn = nn.InstanceNorm2d(num_out_layers, track_running_stats= False, affine=True) def forward(self, x): return F.elu(self.bn(self.conv(x)), inplace=True) class upconvNew(nn.Module): def __init__(self, num_in_layers, num_out_layers, kernel_size, scale): super(upconvNew, self).__init__() self.scale = scale self.conv = conv(num_in_layers, num_out_layers, kernel_size, 1) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_4 = self.conv.bn.weight primals_5 = self.conv.bn.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
QiuhongAnnaWei/IBRNet
upconv
false
14,263
[ "Apache-2.0" ]
254
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
https://github.com/QiuhongAnnaWei/IBRNet/tree/6c8b68e6d95eae04535ff0906387ec7899f5d5ce
ChamferLoss
import torch import torch.nn as nn def cd_loss(preds, gts): def batch_pairwise_dist(x, y): _bs, num_points_x, _points_dim = x.size() _, num_points_y, _ = y.size() xx = torch.bmm(x, x.transpose(2, 1)) yy = torch.bmm(y, y.transpose(2, 1)) zz = torch.bmm(x, y.transpose(2, 1)) diag_ind_x = torch.arange(0, num_points_x) diag_ind_y = torch.arange(0, num_points_y) rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz. transpose(2, 1)) ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz) P = rx.transpose(2, 1) + ry - 2 * zz return P P = batch_pairwise_dist(gts, preds) mins, _ = torch.min(P, 1) loss_1 = torch.sum(mins) mins, _ = torch.min(P, 2) loss_2 = torch.sum(mins) return loss_1 + loss_2 class ChamferLoss(nn.Module): def __init__(self): super().__init__() def forward(self, preds, gts, **kwargs): return cd_loss(preds, gts) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_min_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 r0 = rindex % 4 r2 = rindex tmp0 = tl.load(in_ptr0 + 16 * r1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (5 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (r0 + 16 * r1), None) tmp7 = tl.load(in_ptr0 + (5 + 16 * r1), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + r0 + 16 * r1), None) tmp13 = tl.load(in_ptr0 + (10 + 16 * r1), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr2 + (8 + r0 + 16 * r1), None) tmp19 = tl.load(in_ptr0 + (15 + 16 * r1), None, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr2 + (12 + r0 + 16 * r1), None) tmp28 = tl.load(in_ptr0 + (5 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr1 + 16 * r1, None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + 4 * r2, None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr1 + (5 + 16 * r1), None, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr2 + (1 + 4 * r2), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr1 + (10 + 16 * r1), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr2 + (2 + 4 * r2), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr1 + (15 + 16 * r1), None, eviction_policy= 'evict_last') tmp48 = tl.load(in_ptr2 + (3 + 4 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp2 - tmp5 tmp8 = tmp7 + tmp1 tmp10 = tmp9 * tmp4 tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.minimum(tmp6, tmp11) tmp14 = tmp13 + tmp1 tmp16 = tmp15 * tmp4 tmp17 = tmp14 - tmp16 tmp18 = triton_helpers.minimum(tmp12, tmp17) tmp20 = tmp19 + tmp1 tmp22 = tmp21 * tmp4 tmp23 = tmp20 - tmp22 tmp24 = triton_helpers.minimum(tmp18, tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp30 = tmp28 + tmp29 tmp32 = tmp31 * tmp4 tmp33 = tmp30 - tmp32 tmp35 = tmp28 + tmp34 tmp37 = tmp36 * tmp4 tmp38 = tmp35 - tmp37 tmp39 = triton_helpers.minimum(tmp33, tmp38) tmp41 = tmp28 + tmp40 tmp43 = tmp42 * tmp4 tmp44 = tmp41 - tmp43 tmp45 = triton_helpers.minimum(tmp39, tmp44) tmp47 = tmp28 + tmp46 tmp49 = tmp48 * tmp4 tmp50 = tmp47 - tmp49 tmp51 = triton_helpers.minimum(tmp45, tmp50) tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK]) tmp54 = tl.sum(tmp52, 1)[:, None] tmp55 = tmp27 + tmp54 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp55, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf2) del arg0_1 del arg1_1 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5 del buf5 get_raw_stream(0) triton_per_fused_add_min_mul_sub_sum_0[grid(1)](buf7, buf0, buf1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf7, def cd_loss(preds, gts): def batch_pairwise_dist(x, y): _bs, num_points_x, _points_dim = x.size() _, num_points_y, _ = y.size() xx = torch.bmm(x, x.transpose(2, 1)) yy = torch.bmm(y, y.transpose(2, 1)) zz = torch.bmm(x, y.transpose(2, 1)) diag_ind_x = torch.arange(0, num_points_x) diag_ind_y = torch.arange(0, num_points_y) rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz. transpose(2, 1)) ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz) P = rx.transpose(2, 1) + ry - 2 * zz return P P = batch_pairwise_dist(gts, preds) mins, _ = torch.min(P, 1) loss_1 = torch.sum(mins) mins, _ = torch.min(P, 2) loss_2 = torch.sum(mins) return loss_1 + loss_2 class ChamferLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
RRemixx/DMRDenoise
ChamferLoss
false
14,264
[ "MIT" ]
79
026d25f9eaf98fdfd85a67caeb9b49cab71148e9
https://github.com/RRemixx/DMRDenoise/tree/026d25f9eaf98fdfd85a67caeb9b49cab71148e9
ChannelMixer
import torch import torch.nn.functional as F from torch import nn class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class ChannelMixer(nn.Module): def __init__(self, d_model, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(d_model, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = self.mlp(x) out = x + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'expansion_factor': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_gelu_2[grid(1024)](buf3, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0 ), primals_6, primals_4 class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class ChannelMixerNew(nn.Module): def __init__(self, d_model, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(d_model, expansion_factor, dropout) def forward(self, input_0): primals_2 = self.norm.weight primals_3 = self.norm.bias primals_4 = self.mlp.fc1.weight primals_5 = self.mlp.fc1.bias primals_6 = self.mlp.fc2.weight primals_7 = self.mlp.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
RAYTRAC3R/mlp-singer
ChannelMixer
false
14,265
[ "MIT" ]
82
a68299b943815353fcc177e4873d24d1d0937cfb
https://github.com/RAYTRAC3R/mlp-singer/tree/a68299b943815353fcc177e4873d24d1d0937cfb
ConformerEncoderLayer
import torch import torch.nn as nn from torch.optim import * from torch.optim.lr_scheduler import * import torch.nn.functional as F def multi_head_sep_attention_forward(query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None, need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None, v_proj_weight=None, static_k=None, static_v=None, shared_qk=False, sep= False): if not torch.jit.is_scripting(): pass tgt_len, bsz, embed_dim = query.size() assert embed_dim == embed_dim_to_check assert key.size() == value.size() head_dim = embed_dim // num_heads assert head_dim * num_heads == embed_dim, 'embed_dim must be divisible by num_heads' scaling = float(head_dim) ** -0.5 if not use_separate_proj_weight: if torch.equal(query, key) and torch.equal(key, value): if shared_qk: k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk( 2, dim=-1) q = k else: q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk( 3, dim=-1) elif torch.equal(key, value): _b = in_proj_bias _start = 0 _end = embed_dim _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] q = F.linear(query, _w, _b) if key is None: assert value is None k = None v = None else: _b = in_proj_bias _start = embed_dim _end = None _w = in_proj_weight[_start:, :] if _b is not None: _b = _b[_start:] k, v = F.linear(key, _w, _b).chunk(2, dim=-1) else: _b = in_proj_bias _start = 0 _end = embed_dim _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] q = F.linear(query, _w, _b) _b = in_proj_bias _start = embed_dim _end = embed_dim * 2 _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] k = F.linear(key, _w, _b) _b = in_proj_bias _start = embed_dim * 2 _end = None _w = in_proj_weight[_start:, :] if _b is not None: _b = _b[_start:] v = F.linear(value, _w, _b) else: q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) len1, len2 = q_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == query.size(-1) k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) len1, len2 = k_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == key.size(-1) v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) len1, len2 = v_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == value.size(-1) if in_proj_bias is not None: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0: embed_dim]) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim :embed_dim * 2]) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[ embed_dim * 2:]) else: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias) q = q * scaling if attn_mask is not None: if attn_mask.dim() == 2: attn_mask = attn_mask.unsqueeze(0) if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: raise RuntimeError( 'The size of the 2D attn_mask is not correct.') elif attn_mask.dim() == 3: if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: raise RuntimeError( 'The size of the 3D attn_mask is not correct.') else: raise RuntimeError("attn_mask's dimension {} is not supported". format(attn_mask.dim())) if bias_k is not None and bias_v is not None: if static_k is None and static_v is None: k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = pad(attn_mask, (0, 1)) if key_padding_mask is not None: key_padding_mask = pad(key_padding_mask, (0, 1)) else: assert static_k is None, 'bias cannot be added to static key.' assert static_v is None, 'bias cannot be added to static value.' else: assert bias_k is None assert bias_v is None q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if static_k is not None: assert static_k.size(0) == bsz * num_heads assert static_k.size(2) == head_dim k = static_k if static_v is not None: assert static_v.size(0) == bsz * num_heads assert static_v.size(2) == head_dim v = static_v src_len = k.size(1) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if add_zero_attn: src_len += 1 k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype= k.dtype, device=k.device)], dim=1) v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype= v.dtype, device=v.device)], dim=1) if attn_mask is not None: attn_mask = pad(attn_mask, (0, 1)) if key_padding_mask is not None: key_padding_mask = pad(key_padding_mask, (0, 1)) if sep: attn_output_weights = k.transpose(1, 2) assert list(attn_output_weights.size()) == [bsz * num_heads, head_dim, src_len] if key_padding_mask is not None: attn_output_weights = attn_output_weights.view(bsz, num_heads, head_dim, src_len) attn_output_weights = attn_output_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')) attn_output_weights = attn_output_weights.view(bsz * num_heads, head_dim, src_len) attn_output_weights = F.softmax(attn_output_weights, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, v) assert list(attn_output.size()) == [bsz * num_heads, head_dim, head_dim ] attn_output_weights = F.softmax(q, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, attn_output) assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) return attn_output, None else: attn_output_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] if attn_mask is not None: attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) attn_output_weights = attn_output_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')) attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) attn_output_weights = F.softmax(attn_output_weights, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, v) assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None def _get_activation_fn(activation): if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class MultiheadSeparableAttention(nn.Module): __annotations__ = {'bias_k': torch._jit_internal.Optional[torch.Tensor], 'bias_v': torch._jit_internal.Optional[torch.Tensor]} __constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight'] def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, shared_qk=False, sep=False): super(MultiheadSeparableAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim == embed_dim) self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.shared_qk = shared_qk self.sep = sep if self._qkv_same_embed_dim is False: self.q_proj_weight = nn.Parameter(torch.Tensor(embed_dim, embed_dim)) self.k_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. kdim)) self.v_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. vdim)) self.register_parameter('in_proj_weight', None) else: if shared_qk: self.in_proj_weight = nn.Parameter(torch.empty(2 * embed_dim, embed_dim)) else: self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim, embed_dim)) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: if shared_qk: self.in_proj_bias = nn.Parameter(torch.empty(2 * embed_dim)) else: self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = nn.Parameter(torch.empty(1, 1, embed_dim)) self.bias_v = nn.Parameter(torch.empty(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: nn.init.xavier_uniform_(self.in_proj_weight) else: nn.init.xavier_uniform_(self.q_proj_weight) nn.init.xavier_uniform_(self.k_proj_weight) nn.init.xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def __setstate__(self, state): if '_qkv_same_embed_dim' not in state: state['_qkv_same_embed_dim'] = True super(MultiheadSeparableAttention, self).__setstate__(state) def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None): if not self._qkv_same_embed_dim: return multi_head_sep_attention_forward(query, key, value, self .embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=True, q_proj_weight=self. q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight, shared_qk=self.shared_qk, sep=self.sep) else: return multi_head_sep_attention_forward(query, key, value, self .embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, shared_qk= self.shared_qk, sep=self.sep) class ConformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', attn=False, conv=False, convsz=1, shared_qk= False, sep=False): super(ConformerEncoderLayer, self).__init__() self.conv = conv if conv: assert convsz % 2 == 1 self.conv_layer = nn.Conv1d(d_model, d_model, convsz, padding= int((convsz - 1) / 2), groups=nhead) self.attn = attn if attn: self.self_attn = MultiheadSeparableAttention(d_model, nhead, dropout=dropout, shared_qk=shared_qk, sep=sep) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(ConformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): if self.conv: src2 = src.permute(1, 2, 0) src2 = self.conv_layer(src2) src2 = src2.permute(2, 0, 1) else: src2 = src if self.attn: src2 = self.self_attn(src2, src2, src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.optim import * from torch.optim.lr_scheduler import * import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 + tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 + tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp12 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp6 - tmp12 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp9 - tmp12 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = tmp23 / tmp11 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tl.store(out_ptr0 + x0, tmp12, xmask) tl.store(out_ptr1 + x0, tmp27, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 + tmp0 tmp3 = tmp1 - tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2048, 4), (4, 1)) assert_size_stride(primals_5, (2048,), (1,)) assert_size_stride(primals_6, (4, 2048), (2048, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2048), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0) del buf3 buf10 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(131072)](buf4, primals_5, buf10, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 2048), (2048, 1), 0 ), reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), out=buf5 ) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, buf2, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf7 = buf1 del buf1 buf8 = buf0 del buf0 triton_poi_fused_native_layer_norm_4[grid(64)](buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_5[grid(256)](buf6, buf7, buf8, primals_8, primals_9, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del buf8 del primals_9 return buf9, primals_1, primals_8, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 2048), (2048, 1), 0 ), buf6, primals_6, buf10, primals_4 def multi_head_sep_attention_forward(query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None, need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None, v_proj_weight=None, static_k=None, static_v=None, shared_qk=False, sep= False): if not torch.jit.is_scripting(): pass tgt_len, bsz, embed_dim = query.size() assert embed_dim == embed_dim_to_check assert key.size() == value.size() head_dim = embed_dim // num_heads assert head_dim * num_heads == embed_dim, 'embed_dim must be divisible by num_heads' scaling = float(head_dim) ** -0.5 if not use_separate_proj_weight: if torch.equal(query, key) and torch.equal(key, value): if shared_qk: k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk( 2, dim=-1) q = k else: q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk( 3, dim=-1) elif torch.equal(key, value): _b = in_proj_bias _start = 0 _end = embed_dim _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] q = F.linear(query, _w, _b) if key is None: assert value is None k = None v = None else: _b = in_proj_bias _start = embed_dim _end = None _w = in_proj_weight[_start:, :] if _b is not None: _b = _b[_start:] k, v = F.linear(key, _w, _b).chunk(2, dim=-1) else: _b = in_proj_bias _start = 0 _end = embed_dim _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] q = F.linear(query, _w, _b) _b = in_proj_bias _start = embed_dim _end = embed_dim * 2 _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] k = F.linear(key, _w, _b) _b = in_proj_bias _start = embed_dim * 2 _end = None _w = in_proj_weight[_start:, :] if _b is not None: _b = _b[_start:] v = F.linear(value, _w, _b) else: q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) len1, len2 = q_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == query.size(-1) k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) len1, len2 = k_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == key.size(-1) v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) len1, len2 = v_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == value.size(-1) if in_proj_bias is not None: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0: embed_dim]) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim :embed_dim * 2]) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[ embed_dim * 2:]) else: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias) q = q * scaling if attn_mask is not None: if attn_mask.dim() == 2: attn_mask = attn_mask.unsqueeze(0) if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: raise RuntimeError( 'The size of the 2D attn_mask is not correct.') elif attn_mask.dim() == 3: if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: raise RuntimeError( 'The size of the 3D attn_mask is not correct.') else: raise RuntimeError("attn_mask's dimension {} is not supported". format(attn_mask.dim())) if bias_k is not None and bias_v is not None: if static_k is None and static_v is None: k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = pad(attn_mask, (0, 1)) if key_padding_mask is not None: key_padding_mask = pad(key_padding_mask, (0, 1)) else: assert static_k is None, 'bias cannot be added to static key.' assert static_v is None, 'bias cannot be added to static value.' else: assert bias_k is None assert bias_v is None q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if static_k is not None: assert static_k.size(0) == bsz * num_heads assert static_k.size(2) == head_dim k = static_k if static_v is not None: assert static_v.size(0) == bsz * num_heads assert static_v.size(2) == head_dim v = static_v src_len = k.size(1) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if add_zero_attn: src_len += 1 k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype= k.dtype, device=k.device)], dim=1) v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype= v.dtype, device=v.device)], dim=1) if attn_mask is not None: attn_mask = pad(attn_mask, (0, 1)) if key_padding_mask is not None: key_padding_mask = pad(key_padding_mask, (0, 1)) if sep: attn_output_weights = k.transpose(1, 2) assert list(attn_output_weights.size()) == [bsz * num_heads, head_dim, src_len] if key_padding_mask is not None: attn_output_weights = attn_output_weights.view(bsz, num_heads, head_dim, src_len) attn_output_weights = attn_output_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')) attn_output_weights = attn_output_weights.view(bsz * num_heads, head_dim, src_len) attn_output_weights = F.softmax(attn_output_weights, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, v) assert list(attn_output.size()) == [bsz * num_heads, head_dim, head_dim ] attn_output_weights = F.softmax(q, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, attn_output) assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) return attn_output, None else: attn_output_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] if attn_mask is not None: attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) attn_output_weights = attn_output_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')) attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) attn_output_weights = F.softmax(attn_output_weights, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, v) assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None def _get_activation_fn(activation): if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class MultiheadSeparableAttention(nn.Module): __annotations__ = {'bias_k': torch._jit_internal.Optional[torch.Tensor], 'bias_v': torch._jit_internal.Optional[torch.Tensor]} __constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight'] def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, shared_qk=False, sep=False): super(MultiheadSeparableAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim == embed_dim) self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.shared_qk = shared_qk self.sep = sep if self._qkv_same_embed_dim is False: self.q_proj_weight = nn.Parameter(torch.Tensor(embed_dim, embed_dim)) self.k_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. kdim)) self.v_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. vdim)) self.register_parameter('in_proj_weight', None) else: if shared_qk: self.in_proj_weight = nn.Parameter(torch.empty(2 * embed_dim, embed_dim)) else: self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim, embed_dim)) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: if shared_qk: self.in_proj_bias = nn.Parameter(torch.empty(2 * embed_dim)) else: self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = nn.Parameter(torch.empty(1, 1, embed_dim)) self.bias_v = nn.Parameter(torch.empty(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: nn.init.xavier_uniform_(self.in_proj_weight) else: nn.init.xavier_uniform_(self.q_proj_weight) nn.init.xavier_uniform_(self.k_proj_weight) nn.init.xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def __setstate__(self, state): if '_qkv_same_embed_dim' not in state: state['_qkv_same_embed_dim'] = True super(MultiheadSeparableAttention, self).__setstate__(state) def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None): if not self._qkv_same_embed_dim: return multi_head_sep_attention_forward(query, key, value, self .embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=True, q_proj_weight=self. q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight, shared_qk=self.shared_qk, sep=self.sep) else: return multi_head_sep_attention_forward(query, key, value, self .embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, shared_qk= self.shared_qk, sep=self.sep) class ConformerEncoderLayerNew(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', attn=False, conv=False, convsz=1, shared_qk= False, sep=False): super(ConformerEncoderLayerNew, self).__init__() self.conv = conv if conv: assert convsz % 2 == 1 self.conv_layer = nn.Conv1d(d_model, d_model, convsz, padding= int((convsz - 1) / 2), groups=nhead) self.attn = attn if attn: self.self_attn = MultiheadSeparableAttention(d_model, nhead, dropout=dropout, shared_qk=shared_qk, sep=sep) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(ConformerEncoderLayerNew, self).__setstate__(state) def forward(self, input_0): primals_4 = self.linear1.weight primals_5 = self.linear1.bias primals_6 = self.linear2.weight primals_2 = self.linear2.bias primals_3 = self.norm1.weight primals_7 = self.norm1.bias primals_8 = self.norm2.weight primals_9 = self.norm2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
PranjaliJain/matchmaker
ConformerEncoderLayer
false
14,266
[ "Apache-2.0" ]
97
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
https://github.com/PranjaliJain/matchmaker/tree/b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
RepulsionLoss
import torch import torch.nn as nn def get_knn_idx_dist(pos: 'torch.FloatTensor', query: 'torch.FloatTensor', k, offset=0): """ :param pos: (B, N, F) :param query: (B, M, F) :return knn_idx: (B, M, k) """ B, N, F = tuple(pos.size()) M = query.size(1) pos = pos.unsqueeze(1).expand(B, M, N, F) query = query.unsqueeze(2).expand(B, M, N, F) dist = torch.sum((pos - query) ** 2, dim=3, keepdim=False) knn_idx = torch.argsort(dist, dim=2)[:, :, offset:k + offset] knn_dist = torch.gather(dist, dim=2, index=knn_idx) return knn_idx, knn_dist class RepulsionLoss(nn.Module): def __init__(self, knn=4, h=0.03): super().__init__() self.knn = knn self.h = h def forward(self, pc): _knn_idx, knn_dist = get_knn_idx_dist(pc, pc, k=self.knn, offset=1) weight = torch.exp(-knn_dist / self.h ** 2) loss = torch.sum(-knn_dist * weight) return loss def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_pow_sort_sub_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * r2 + 16 * x1), xmask, eviction_policy ='evict_last', other=0.0) tmp5 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * r2 + 16 * x1), xmask, eviction_policy ='evict_last', other=0.0) tmp10 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * r2 + 16 * x1), xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = r2 tmp20 = tmp19.to(tl.int16) tmp21 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp22 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) _tmp23, tmp24 = triton_helpers.sort_with_index(tmp21, tmp22, None, 1, stable=False, descending=False) tl.store(out_ptr0 + (r2 + 4 * x3), tmp18, xmask) tl.store(out_ptr1 + (r2 + 4 * x3), tmp24, xmask) @triton.jit def triton_per_fused_div_exp_gather_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 3 r1 = rindex // 3 tmp0 = tl.load(in_ptr0 + (1 + r0 + 4 * r1), rmask, other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~rmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (tmp5 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp8 = -tmp7 tmp9 = 1111.111111111111 tmp10 = tmp8 * tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp8 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int16) get_raw_stream(0) triton_per_fused_pow_sort_sub_sum_0[grid(16)](arg0_1, buf0, buf2, 16, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_div_exp_gather_mul_neg_sum_1[grid(1)](buf2, buf0, buf3, 1, 48, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf2 return buf3, def get_knn_idx_dist(pos: 'torch.FloatTensor', query: 'torch.FloatTensor', k, offset=0): """ :param pos: (B, N, F) :param query: (B, M, F) :return knn_idx: (B, M, k) """ B, N, F = tuple(pos.size()) M = query.size(1) pos = pos.unsqueeze(1).expand(B, M, N, F) query = query.unsqueeze(2).expand(B, M, N, F) dist = torch.sum((pos - query) ** 2, dim=3, keepdim=False) knn_idx = torch.argsort(dist, dim=2)[:, :, offset:k + offset] knn_dist = torch.gather(dist, dim=2, index=knn_idx) return knn_idx, knn_dist class RepulsionLossNew(nn.Module): def __init__(self, knn=4, h=0.03): super().__init__() self.knn = knn self.h = h def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
RRemixx/DMRDenoise
RepulsionLoss
false
14,267
[ "MIT" ]
79
026d25f9eaf98fdfd85a67caeb9b49cab71148e9
https://github.com/RRemixx/DMRDenoise/tree/026d25f9eaf98fdfd85a67caeb9b49cab71148e9
BasicBlock
import torch import torch.nn as nn import torch.utils.data.distributed def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, padding_mode='reflect') class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_layer=None): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes, track_running_stats=False, affine=True) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes, track_running_stats=False, affine=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x5, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_4( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, xmask) tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tmp18 = 0.0 tmp19 = tmp17 <= tmp18 tl.store(out_ptr0 + x3, tmp17, xmask) tl.store(out_ptr1 + x3, tmp19, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) buf5 = reinterpret_tensor(buf3, (1, 4, 1, 1), (4, 1, 1, 1), 0) del buf3 triton_per_fused__native_batch_norm_legit_1[grid(4)](buf5, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_reflection_pad2d_relu_2[grid (576)](buf1, buf2, buf5, primals_3, primals_4, buf6, 576, XBLOCK=256, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) buf9 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) buf11 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) triton_per_fused__native_batch_norm_legit_3[grid(4)](buf7, buf8, buf9, buf11, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_4[ grid(256)](buf7, buf8, buf9, primals_6, primals_7, primals_1, buf12, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 del primals_1 del primals_7 return (buf12, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf1, buf2, buf5, buf6, buf7, reinterpret_tensor(buf11, (4,), (1,), 0), buf13, reinterpret_tensor(buf8, (1, 4, 1, 1), (4, 1, 1, 1 ), 0)) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, padding_mode='reflect') class BasicBlockNew(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_layer=None): super(BasicBlockNew, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes, track_running_stats=False, affine=True) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes, track_running_stats=False, affine=True) self.downsample = downsample self.stride = stride def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.bn1.weight primals_4 = self.bn1.bias primals_5 = self.conv2.weight primals_6 = self.bn2.weight primals_7 = self.bn2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
QiuhongAnnaWei/IBRNet
BasicBlock
false
14,268
[ "Apache-2.0" ]
254
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
https://github.com/QiuhongAnnaWei/IBRNet/tree/6c8b68e6d95eae04535ff0906387ec7899f5d5ce
BasicBlock
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils import weight_norm def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, bias=True) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, weightnorm=None, shortcut=True): super(BasicBlock, self).__init__() self.shortcut = shortcut self.conv1 = conv3x3(inplanes, planes, stride) self.relu1 = nn.PReLU(num_parameters=planes, init=0.1) self.relu2 = nn.PReLU(num_parameters=planes, init=0.1) self.conv2 = conv3x3(inplanes, planes, stride) if weightnorm: self.conv1 = weight_norm(self.conv1) self.conv2 = weight_norm(self.conv2) def forward(self, x): out = self.relu1(x) out = F.pad(out, (1, 1, 1, 1), 'reflect') out = self.conv1(out) out = out[:, :, :x.shape[2], :x.shape[3]] out = self.relu2(out) out = F.pad(out, (1, 1, 1, 1), 'reflect') out = self.conv2(out) out = out[:, :, :x.shape[2], :x.shape[3]] if self.shortcut: out = x + out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn.utils import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp4 = tmp3 * tmp0 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x5, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_reflection_pad2d_0[grid(576)](primals_2, primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused__prelu_kernel_reflection_pad2d_0[grid(576)](buf2, primals_5, buf3, 576, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_convolution_2[grid(256)](buf5, primals_2, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, primals_2, primals_3, primals_5, primals_6, buf0, buf2, buf3 def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, bias=True) class BasicBlockNew(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, weightnorm=None, shortcut=True): super(BasicBlockNew, self).__init__() self.shortcut = shortcut self.conv1 = conv3x3(inplanes, planes, stride) self.relu1 = nn.PReLU(num_parameters=planes, init=0.1) self.relu2 = nn.PReLU(num_parameters=planes, init=0.1) self.conv2 = conv3x3(inplanes, planes, stride) if weightnorm: self.conv1 = weight_norm(self.conv1) self.conv2 = weight_norm(self.conv2) def forward(self, input_0): primals_3 = self.conv1.weight primals_1 = self.conv1.bias primals_4 = self.relu1.weight primals_5 = self.relu2.weight primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
RaoUmer/SRResCycGAN
BasicBlock
false
14,269
[ "MIT" ]
50
b0999180a1906f519915ba2034fe492aef162109
https://github.com/RaoUmer/SRResCycGAN/tree/b0999180a1906f519915ba2034fe492aef162109
FunctionalConv3d
import torch class FunctionalConv3d(torch.nn.Module): def __init__(self, *args, **kwargs): super().__init__() self.conv = torch.nn.Conv3d(*args, **kwargs) def forward(self, x): x = torch.nn.functional.conv3d(x, self.conv.weight, self.conv.bias, self.conv.stride, self.conv.padding, self.conv.dilation, self. conv.groups) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (1, 4, 1, 1, 1), (4, 1, 4, 4, 4), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(4)](buf1, primals_2, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class FunctionalConv3dNew(torch.nn.Module): def __init__(self, *args, **kwargs): super().__init__() self.conv = torch.nn.Conv3d(*args, **kwargs) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
PogChamper/torch2trt
FunctionalConv3d
false
14,270
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
PositionwiseFeedForward
import torch import torch.nn as nn import torch.nn.functional as F class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Rajathbharadwaj/algorithmic-efficiency
PositionwiseFeedForward
false
14,271
[ "Apache-2.0" ]
49
47d2928836e0574bc54cc3ad58860dd4daf86cce
https://github.com/Rajathbharadwaj/algorithmic-efficiency/tree/47d2928836e0574bc54cc3ad58860dd4daf86cce
ConvBlock
import torch import torch.nn as nn class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlock(nn.Module): """Layer to perform a convolution followed by ELU """ def __init__(self, in_channels, out_channels): super(ConvBlock, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, x): out = self.conv(x) out = self.nonlin(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlockNew(nn.Module): """Layer to perform a convolution followed by ELU """ def __init__(self, in_channels, out_channels): super(ConvBlockNew, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RaresAmbrus/KP3D
ConvBlock
false
14,272
[ "MIT" ]
227
7966c66679d32b81ea6e3181847ab3146e5a3ed2
https://github.com/RaresAmbrus/KP3D/tree/7966c66679d32b81ea6e3181847ab3146e5a3ed2
TensorSigmoid
import torch class TensorSigmoid(torch.nn.Module): def __init__(self): super(TensorSigmoid, self).__init__() def forward(self, x): return x.sigmoid() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorSigmoidNew(torch.nn.Module): def __init__(self): super(TensorSigmoidNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
TensorSigmoid
false
14,273
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
MultiHeadAttention
import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -1000000000.0) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc(q)) q += residual q = self.layer_norm(q) return q, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 16), (16, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_1, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_1, buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_9 return buf14, buf7, primals_1, primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0 ), buf11, primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -1000000000.0) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttentionNew(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, input_0, input_1, input_2): primals_4 = self.w_qs.weight primals_5 = self.w_ks.weight primals_6 = self.w_vs.weight primals_7 = self.fc.weight primals_8 = self.layer_norm.weight primals_9 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
Rajathbharadwaj/algorithmic-efficiency
MultiHeadAttention
false
14,274
[ "Apache-2.0" ]
49
47d2928836e0574bc54cc3ad58860dd4daf86cce
https://github.com/Rajathbharadwaj/algorithmic-efficiency/tree/47d2928836e0574bc54cc3ad58860dd4daf86cce
decoderDepth
import torch import torch.nn as nn import torch.nn.functional as F class decoderDepth(nn.Module): def __init__(self): super(decoderDepth, self).__init__() self.dconv0 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True) self.dgn0 = nn.GroupNorm(32, 512) self.dconv1 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True) self.dgn1 = nn.GroupNorm(16, 256) self.dconv2 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True) self.dgn2 = nn.GroupNorm(16, 256) self.dconv3 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True) self.dgn3 = nn.GroupNorm(8, 128) self.dconv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True) self.dgn4 = nn.GroupNorm(8, 128) self.dconv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True) self.dgn5 = nn.GroupNorm(4, 64) self.convFinal = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=True) def forward(self, x): x1 = F.relu(self.dgn0(self.dconv0(x)), True) x2 = F.relu(self.dgn1(self.dconv1(x1)), True) x3 = F.relu(self.dgn2(self.dconv2(F.interpolate(x2, scale_factor=2, mode='bilinear'))), True) x4 = F.relu(self.dgn3(self.dconv3(x3)), True) x5 = F.relu(self.dgn4(self.dconv4(F.interpolate(x4, scale_factor=2, mode='bilinear'))), True) x6 = F.relu(self.dgn5(self.dconv5(x5)), True) depthDelta = 0.5 * torch.tanh(self.convFinal(F.interpolate(x6, scale_factor=2, mode='bilinear'))) return depthDelta def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x0 = xindex % 128 tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r5 = rindex r3 = rindex // 4096 tmp0 = tl.load(in_out_ptr0 + (r5 + 16384 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + 4 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r5 + 16384 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr2 + x4, tmp6, xmask) @triton.jit def triton_per_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 128 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 65536.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 4096 x1 = xindex // 4096 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 65536.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_convolution_native_group_norm_3(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x0 = xindex % 128 tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r5 = rindex r3 = rindex // 4096 tmp0 = tl.load(in_out_ptr0 + (r5 + 8192 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + 2 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r5 + 8192 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr2 + x4, tmp6, xmask) @triton.jit def triton_per_fused_native_group_norm_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 65536.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 4096 x1 = xindex // 4096 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused__to_copy_6(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_7(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 63, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 128 % 128 x0 = xindex % 128 x2 = xindex // 16384 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 64, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x2), None, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 64 * tmp4 + 4096 * x2), None, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 64 * tmp22 + 4096 * x2), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + 64 * tmp22 + 4096 * x2), None, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tl.store(in_out_ptr0 + x4, tmp31, None) @triton.jit def triton_red_fused_convolution_native_group_norm_10(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x0 = xindex % 128 tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r5 = rindex r3 = rindex // 16384 tmp0 = tl.load(in_out_ptr0 + (r5 + 32768 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + 2 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r5 + 32768 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr2 + x4, tmp6, xmask) @triton.jit def triton_per_fused_native_group_norm_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 262144.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 16384 x1 = xindex // 16384 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 262144.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_convolution_native_group_norm_13(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + 16384 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + 16384 * x3), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr2 + x3, tmp6, xmask) @triton.jit def triton_per_fused_native_group_norm_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 262144.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 16384 x1 = xindex // 16384 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused__to_copy_16(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_17(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 127, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_19(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 128, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 128 * tmp4 + 16384 * x2), None, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 128 * tmp4 + 16384 * x2), None, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 128 * tmp22 + 16384 * x2), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + 128 * tmp22 + 16384 * x2), None, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tl.store(in_out_ptr0 + x4, tmp31, None) @triton.jit def triton_poi_fused_convolution_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_21(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 448 rnumel = 74899 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 14 x1 = xindex // 14 tmp13_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp13_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp13_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = r2 + 74899 * x0 tmp1 = tl.full([1, 1], 1048576, tl.int32) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (1048576 * x1 + (r2 + 74899 * x0) % 1048576), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp4 = 0.0 tmp5 = tl.full(tmp4.shape, 0, tmp4.dtype) tmp6 = tl.where(tmp2, tmp4, tmp5) tmp7 = 1.0 tmp8 = tl.full(tmp7.shape, 0, tmp7.dtype) tmp9 = tl.where(tmp2, tmp7, tmp8) tmp10 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp11 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp12 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp13_mean_next, tmp13_m2_next, tmp13_weight_next = (triton_helpers .welford_combine(tmp13_mean, tmp13_m2, tmp13_weight, tmp10, tmp11, tmp12)) tmp13_mean = tl.where(rmask & xmask, tmp13_mean_next, tmp13_mean) tmp13_m2 = tl.where(rmask & xmask, tmp13_m2_next, tmp13_m2) tmp13_weight = tl.where(rmask & xmask, tmp13_weight_next, tmp13_weight) tmp13_tmp, tmp14_tmp, tmp15_tmp = triton_helpers.welford(tmp13_mean, tmp13_m2, tmp13_weight, 1) tmp13 = tmp13_tmp[:, None] tmp14 = tmp14_tmp[:, None] tmp15 = tmp15_tmp[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_group_norm_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 rnumel = 14 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 14 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 14 * x0), rmask & xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 14 * x0), rmask & xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(rmask & xmask, tmp3, 0) tmp8 = tl.where(rmask & xmask, tmp4, 0) tmp9 = tl.where(rmask & xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 1048576.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 65536 x1 = xindex // 65536 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1048576.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_convolution_native_group_norm_24(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x1 = xindex // 2 % 64 tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_out_ptr0 + (r3 + 32768 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r3 + 32768 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr2 + x4, tmp6, xmask) @triton.jit def triton_per_fused_native_group_norm_25(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 32 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 32 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 1048576.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 65536 x1 = xindex // 65536 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused__to_copy_27(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_28(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 255, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_29(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_30(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 512 % 512 x0 = xindex % 512 x2 = xindex // 262144 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 256, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 256 * tmp4 + 65536 * x2), None, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 256 * tmp4 + 65536 * x2), None, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 256 * tmp22 + 65536 * x2), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + 256 * tmp22 + 65536 * x2), None, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tl.store(in_out_ptr0 + x4, tmp31, None) @triton.jit def triton_poi_fused_convolution_mul_tanh_31(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tmp5 = 0.5 tmp6 = tmp4 * tmp5 tl.store(in_out_ptr0 + x0, tmp3, None) tl.store(out_ptr0 + x0, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_4, (512,), (1,)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256,), (1,)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256,), (1,)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128,), (1,)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64,), (1,)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (1, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 32, 1, 1, 4), (128, 4, 512, 512, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 1, 1, 4), (128, 4, 512, 512, 1), torch.float32) buf4 = empty_strided_cuda((4, 32, 1, 1, 4), (128, 4, 512, 512, 1), torch.float32) get_raw_stream(0) triton_red_fused_convolution_native_group_norm_0[grid(512)](buf1, primals_2, buf2, buf3, buf4, 512, 16384, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) del primals_2 buf5 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf6 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf8 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_per_fused_native_group_norm_1[grid(128)](buf2, buf3, buf4, buf5, buf6, buf8, 128, 4, XBLOCK=1, num_warps=2, num_stages=1) buf9 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) triton_poi_fused_native_group_norm_relu_2[grid(8388608)](buf1, buf5, buf6, primals_4, primals_5, buf9, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf11 = buf10 del buf10 buf12 = reinterpret_tensor(buf4, (4, 16, 1, 1, 8), (128, 8, 512, 512, 1), 0) del buf4 buf13 = reinterpret_tensor(buf3, (4, 16, 1, 1, 8), (128, 8, 512, 512, 1), 0) del buf3 buf14 = reinterpret_tensor(buf2, (4, 16, 1, 1, 8), (128, 8, 512, 512, 1), 0) del buf2 triton_red_fused_convolution_native_group_norm_3[grid(512)](buf11, primals_7, buf12, buf13, buf14, 512, 8192, XBLOCK=1, RBLOCK= 1024, num_warps=16, num_stages=1) del primals_7 buf15 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf16 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf18 = reinterpret_tensor(buf16, (4, 16, 1, 1), (16, 1, 1, 1), 0) del buf16 triton_per_fused_native_group_norm_4[grid(64)](buf18, buf12, buf13, buf14, buf15, 64, 8, XBLOCK=1, num_warps=2, num_stages=1) buf19 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) triton_poi_fused_native_group_norm_relu_5[grid(4194304)](buf11, buf15, buf18, primals_8, primals_9, buf19, 4194304, XBLOCK=512, num_warps=8, num_stages=1) buf20 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_6[grid(128)](buf20, 128, XBLOCK=128, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_7[grid(128)](buf21, 128, XBLOCK=128, num_warps=4, num_stages=1) buf22 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused__to_copy_6[grid(128)](buf22, 128, XBLOCK=128, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused_add_clamp_7[grid(128)](buf23, 128, XBLOCK=128, num_warps=4, num_stages=1) buf24 = reinterpret_tensor(buf6, (128,), (1,), 0) del buf6 triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(128)](buf24, 128, XBLOCK=128, num_warps=4, num_stages=1) buf26 = empty_strided_cuda((128, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(128)](buf26, 128, XBLOCK=128, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((4, 256, 128, 128), (4194304, 16384, 128, 1), torch.float32) buf27 = buf25 del buf25 triton_poi_fused__unsafe_index_add_mul_sub_9[grid(16777216)](buf27, buf20, buf22, buf19, buf23, buf24, buf21, buf26, 16777216, XBLOCK=1024, num_warps=4, num_stages=1) del buf19 buf28 = extern_kernels.convolution(buf27, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf29 = buf28 del buf28 buf30 = buf14 del buf14 buf31 = buf13 del buf13 buf32 = buf12 del buf12 triton_red_fused_convolution_native_group_norm_10[grid(512)](buf29, primals_11, buf30, buf31, buf32, 512, 32768, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del primals_11 buf33 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf34 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf36 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) triton_per_fused_native_group_norm_11[grid(64)](buf30, buf31, buf32, buf33, buf34, buf36, 64, 8, XBLOCK=8, num_warps=2, num_stages=1) buf37 = empty_strided_cuda((4, 256, 128, 128), (4194304, 16384, 128, 1), torch.float32) triton_poi_fused_native_group_norm_relu_12[grid(16777216)](buf29, buf33, buf34, primals_12, primals_13, buf37, 16777216, XBLOCK= 1024, num_warps=4, num_stages=1) del buf34 del primals_13 buf38 = extern_kernels.convolution(buf37, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 128, 128), (2097152, 16384, 128, 1)) buf39 = buf38 del buf38 buf40 = reinterpret_tensor(buf32, (4, 8, 1, 1, 16), (128, 16, 512, 512, 1), 0) del buf32 buf41 = reinterpret_tensor(buf31, (4, 8, 1, 1, 16), (128, 16, 512, 512, 1), 0) del buf31 buf42 = reinterpret_tensor(buf30, (4, 8, 1, 1, 16), (128, 16, 512, 512, 1), 0) del buf30 triton_red_fused_convolution_native_group_norm_13[grid(512)](buf39, primals_15, buf40, buf41, buf42, 512, 16384, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del primals_15 buf43 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32) buf44 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf46 = reinterpret_tensor(buf44, (4, 8, 1, 1), (8, 1, 1, 1), 0) del buf44 triton_per_fused_native_group_norm_14[grid(32)](buf46, buf40, buf41, buf42, buf43, 32, 16, XBLOCK=1, num_warps=2, num_stages=1) buf47 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.float32) triton_poi_fused_native_group_norm_relu_15[grid(8388608)](buf39, buf43, buf46, primals_16, primals_17, buf47, 8388608, XBLOCK= 1024, num_warps=4, num_stages=1) buf48 = empty_strided_cuda((256, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_16[grid(256)](buf48, 256, XBLOCK=256, num_warps=4, num_stages=1) buf49 = empty_strided_cuda((256, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_17[grid(256)](buf49, 256, XBLOCK=256, num_warps=4, num_stages=1) buf50 = empty_strided_cuda((256,), (1,), torch.int64) triton_poi_fused__to_copy_16[grid(256)](buf50, 256, XBLOCK=256, num_warps=4, num_stages=1) buf51 = empty_strided_cuda((256,), (1,), torch.int64) triton_poi_fused_add_clamp_17[grid(256)](buf51, 256, XBLOCK=256, num_warps=4, num_stages=1) buf52 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_18[grid(256)](buf52, 256, XBLOCK=256, num_warps=4, num_stages=1) buf54 = empty_strided_cuda((256, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_18[grid(256)](buf54, 256, XBLOCK=256, num_warps=4, num_stages=1) buf53 = empty_strided_cuda((4, 128, 256, 256), (8388608, 65536, 256, 1), torch.float32) buf55 = buf53 del buf53 triton_poi_fused__unsafe_index_add_mul_sub_19[grid(33554432)](buf55, buf48, buf50, buf47, buf51, buf52, buf49, buf54, 33554432, XBLOCK=1024, num_warps=4, num_stages=1) del buf47 buf56 = extern_kernels.convolution(buf55, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 128, 256, 256), (8388608, 65536, 256, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_20[grid(33554432)](buf57, primals_19, 33554432, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf58 = empty_strided_cuda((4, 8, 1, 1, 14), (112, 14, 448, 448, 1), torch.float32) buf59 = empty_strided_cuda((4, 8, 1, 1, 14), (112, 14, 448, 448, 1), torch.float32) buf60 = empty_strided_cuda((4, 8, 1, 1, 14), (112, 14, 448, 448, 1), torch.float32) triton_red_fused_native_group_norm_21[grid(448)](buf57, buf58, buf59, buf60, 448, 74899, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf61 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf62 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf64 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_22[grid(32)](buf58, buf59, buf60, buf61, buf62, buf64, 32, 14, XBLOCK=8, num_warps=2, num_stages=1) del buf58 del buf59 del buf60 buf65 = empty_strided_cuda((4, 128, 256, 256), (8388608, 65536, 256, 1), torch.float32) triton_poi_fused_native_group_norm_relu_23[grid(33554432)](buf57, buf61, buf62, primals_20, primals_21, buf65, 33554432, XBLOCK= 1024, num_warps=4, num_stages=1) del buf62 del primals_21 buf66 = extern_kernels.convolution(buf65, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 64, 256, 256), (4194304, 65536, 256, 1)) buf67 = buf66 del buf66 buf68 = reinterpret_tensor(buf42, (4, 4, 1, 1, 32), (128, 32, 512, 512, 1), 0) del buf42 buf69 = reinterpret_tensor(buf41, (4, 4, 1, 1, 32), (128, 32, 512, 512, 1), 0) del buf41 buf70 = reinterpret_tensor(buf40, (4, 4, 1, 1, 32), (128, 32, 512, 512, 1), 0) del buf40 triton_red_fused_convolution_native_group_norm_24[grid(512)](buf67, primals_23, buf68, buf69, buf70, 512, 32768, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del primals_23 buf71 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf72 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf74 = reinterpret_tensor(buf72, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf72 triton_per_fused_native_group_norm_25[grid(16)](buf74, buf68, buf69, buf70, buf71, 16, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf68 buf75 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(16777216)](buf67, buf71, buf74, primals_24, primals_25, buf75, 16777216, XBLOCK= 1024, num_warps=4, num_stages=1) buf76 = empty_strided_cuda((512, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_27[grid(512)](buf76, 512, XBLOCK=256, num_warps=4, num_stages=1) buf77 = empty_strided_cuda((512, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_28[grid(512)](buf77, 512, XBLOCK=256, num_warps=4, num_stages=1) buf78 = empty_strided_cuda((512,), (1,), torch.int64) triton_poi_fused__to_copy_27[grid(512)](buf78, 512, XBLOCK=256, num_warps=4, num_stages=1) buf79 = empty_strided_cuda((512,), (1,), torch.int64) triton_poi_fused_add_clamp_28[grid(512)](buf79, 512, XBLOCK=256, num_warps=4, num_stages=1) buf80 = reinterpret_tensor(buf70, (512,), (1,), 0) del buf70 triton_poi_fused__to_copy_add_arange_clamp_mul_sub_29[grid(512)](buf80, 512, XBLOCK=256, num_warps=4, num_stages=1) buf82 = reinterpret_tensor(buf69, (512, 1), (1, 1), 0) del buf69 triton_poi_fused__to_copy_add_arange_clamp_mul_sub_29[grid(512)](buf82, 512, XBLOCK=256, num_warps=4, num_stages=1) buf81 = empty_strided_cuda((4, 64, 512, 512), (16777216, 262144, 512, 1), torch.float32) buf83 = buf81 del buf81 triton_poi_fused__unsafe_index_add_mul_sub_30[grid(67108864)](buf83, buf76, buf78, buf75, buf79, buf80, buf77, buf82, 67108864, XBLOCK=1024, num_warps=4, num_stages=1) del buf75 buf84 = extern_kernels.convolution(buf83, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 1, 512, 512), (262144, 262144, 512, 1)) buf85 = buf84 del buf84 buf86 = empty_strided_cuda((4, 1, 512, 512), (262144, 262144, 512, 1), torch.float32) triton_poi_fused_convolution_mul_tanh_31[grid(1048576)](buf85, primals_27, buf86, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 return (buf86, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, primals_10, primals_12, primals_14, primals_16, primals_17, primals_18, primals_20, primals_22, primals_24, primals_25, primals_26, buf1, reinterpret_tensor(buf5, (4, 32), (32, 1), 0), reinterpret_tensor(buf8, (4, 32), (32, 1), 0), buf9, buf11, buf15, buf18, buf20, buf21, buf22, buf23, buf24, buf26, buf27, buf29, reinterpret_tensor(buf33, (4, 16), (16, 1), 0), reinterpret_tensor(buf36, (4, 16), (16, 1), 0), buf37, buf39, buf43, buf46, buf48, buf49, buf50, buf51, buf52, buf54, buf55, buf57, reinterpret_tensor(buf61, (4, 8), (8, 1), 0), reinterpret_tensor( buf64, (4, 8), (8, 1), 0), buf65, buf67, buf71, buf74, buf76, buf77, buf78, buf79, buf80, buf82, buf83, buf85) class decoderDepthNew(nn.Module): def __init__(self): super(decoderDepthNew, self).__init__() self.dconv0 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True) self.dgn0 = nn.GroupNorm(32, 512) self.dconv1 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True) self.dgn1 = nn.GroupNorm(16, 256) self.dconv2 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True) self.dgn2 = nn.GroupNorm(16, 256) self.dconv3 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True) self.dgn3 = nn.GroupNorm(8, 128) self.dconv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True) self.dgn4 = nn.GroupNorm(8, 128) self.dconv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True) self.dgn5 = nn.GroupNorm(4, 64) self.convFinal = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=True) def forward(self, input_0): primals_1 = self.dconv0.weight primals_2 = self.dconv0.bias primals_4 = self.dgn0.weight primals_5 = self.dgn0.bias primals_6 = self.dconv1.weight primals_7 = self.dconv1.bias primals_8 = self.dgn1.weight primals_9 = self.dgn1.bias primals_10 = self.dconv2.weight primals_11 = self.dconv2.bias primals_12 = self.dgn2.weight primals_13 = self.dgn2.bias primals_14 = self.dconv3.weight primals_15 = self.dconv3.bias primals_16 = self.dgn3.weight primals_17 = self.dgn3.bias primals_18 = self.dconv4.weight primals_19 = self.dconv4.bias primals_20 = self.dgn4.weight primals_21 = self.dgn4.bias primals_22 = self.dconv5.weight primals_23 = self.dconv5.bias primals_24 = self.dgn5.weight primals_25 = self.dgn5.bias primals_26 = self.convFinal.weight primals_27 = self.convFinal.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
Miles629/TransparentShapeRealData
decoderDepth
false
14,275
[ "MIT" ]
91
b81098a2d1882f5fd33fba6167d7258dbe02d6d2
https://github.com/Miles629/TransparentShapeRealData/tree/b81098a2d1882f5fd33fba6167d7258dbe02d6d2