entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
AvgPool
import torch import torch.nn.functional as F from torch import nn import torch.utils.data class AvgPool(nn.Module): """1-d average pooling module.""" def __init__(self, stride=None, padding=0): super(AvgPool, self).__init__() self.stride = stride self.padding = padding def forward(self, x): kernel_size = x.size(2) x = F.max_pool1d(input=x, kernel_size=kernel_size, stride=self. stride, padding=self.padding) return x.squeeze(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class AvgPoolNew(nn.Module): """1-d average pooling module.""" def __init__(self, stride=None, padding=0): super(AvgPoolNew, self).__init__() self.stride = stride self.padding = padding def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
FengZiYjun/fastNLP
AvgPool
false
5,149
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
InnerProductNetwork
import torch import torch.utils.data class InnerProductNetwork(torch.nn.Module): def forward(self, x): """ :param x: Float tensor of size ``(batch_size, num_fields, embed_dim)`` """ num_fields = x.shape[1] row, col = list(), list() for i in range(num_fields - 1): for j in range(i + 1, num_fields): row.append(i), col.append(j) return torch.sum(x[:, row] * x[:, col], dim=2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (x0 + 16 * tmp16 + 64 * x2), xmask) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (x0 + 16 * tmp22 + 64 * x2), xmask) tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (4 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp26 = tl.load(in_ptr0 + (4 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (8 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp30 = tl.load(in_ptr0 + (8 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (12 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class InnerProductNetworkNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Fanxingye/Autotabular
InnerProductNetwork
false
5,150
[ "Apache-2.0" ]
1
d630c78290a52f8c73885afb16884e18135c34f6
https://github.com/Fanxingye/Autotabular/tree/d630c78290a52f8c73885afb16884e18135c34f6
RingLoss
import torch import torch.nn as nn class RingLoss(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self, weight_ring=1.0): super(RingLoss, self).__init__() self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) self.weight_ring = weight_ring def forward(self, x): l = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean() return l * self.weight_ring def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp14 = tmp11 - tmp13 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tmp14 * tmp14 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 64.0 tmp22 = tmp20 / tmp21 tmp23 = 1.0 tmp24 = tmp22 * tmp23 tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0[grid(1)](buf3, primals_1, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf3, buf2 class RingLossNew(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self, weight_ring=1.0): super(RingLossNew, self).__init__() self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) self.weight_ring = weight_ring def forward(self, input_0): primals_2 = self.radius primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
FEIfei-coder/circle-loss-for-reid
RingLoss
false
5,151
[ "MIT" ]
1
fbb3be087a6c390fb7f8c000eebb63aa27179a13
https://github.com/FEIfei-coder/circle-loss-for-reid/tree/fbb3be087a6c390fb7f8c000eebb63aa27179a13
LinearBlock
import torch from scipy.stats import truncnorm def truncated_normal_(tensor, mean=0.0, std=1.0): values = truncnorm.rvs(-2, 2, size=tensor.shape) values = mean + std * values tensor.copy_(torch.from_numpy(values)) return tensor def fc_init_(module): if hasattr(module, 'weight') and module.weight is not None: truncated_normal_(module.weight.data, mean=0.0, std=0.01) if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.constant_(module.bias.data, 0.0) return module class LinearBlock(torch.nn.Module): def __init__(self, input_size, output_size): super(LinearBlock, self).__init__() self.relu = torch.nn.ReLU() self.normalize = torch.nn.BatchNorm1d(output_size, affine=True, momentum=0.999, eps=0.001, track_running_stats=False) self.linear = torch.nn.Linear(input_size, output_size) fc_init_(self.linear) def forward(self, x): x = self.linear(x) x = self.normalize(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from scipy.stats import truncnorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = rindex // 4 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0 + 16 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 0.001 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0 , in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 0.001 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = 0.0 tmp17 = tmp15 <= tmp16 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf4 = empty_strided_cuda((1, 4, 1), (4, 1, 1), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_0[grid(4)](buf0, buf1, buf2, buf4, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1[ grid(64)](buf0, buf1, buf2, primals_4, primals_5, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del primals_5 return buf5, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf4, (4,), (1,), 0 ), buf6, reinterpret_tensor(buf1, (1, 4, 1), (4, 1, 1), 0) def truncated_normal_(tensor, mean=0.0, std=1.0): values = truncnorm.rvs(-2, 2, size=tensor.shape) values = mean + std * values tensor.copy_(torch.from_numpy(values)) return tensor def fc_init_(module): if hasattr(module, 'weight') and module.weight is not None: truncated_normal_(module.weight.data, mean=0.0, std=0.01) if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.constant_(module.bias.data, 0.0) return module class LinearBlockNew(torch.nn.Module): def __init__(self, input_size, output_size): super(LinearBlockNew, self).__init__() self.relu = torch.nn.ReLU() self.normalize = torch.nn.BatchNorm1d(output_size, affine=True, momentum=0.999, eps=0.001, track_running_stats=False) self.linear = torch.nn.Linear(input_size, output_size) fc_init_(self.linear) def forward(self, input_0): primals_2 = self.normalize.weight primals_4 = self.normalize.bias primals_1 = self.linear.weight primals_5 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Barchid/snn-fsl
LinearBlock
false
5,152
[ "Apache-2.0" ]
1
8adca6b7541d51b4ac4198f00e784e54589b4c9d
https://github.com/Barchid/snn-fsl/tree/8adca6b7541d51b4ac4198f00e784e54589b4c9d
LNN
import math import torch import torch.nn.functional as F import torch.utils.data class LNN(torch.nn.Module): """A pytorch implementation of LNN layer Input shape. - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``. Arguments - **in_features** : Embedding of feature. - **num_fields**: int.The field size of feature. - **LNN_dim**: int.The number of Logarithmic neuron. - **bias**: bool.Whether or not use bias in LNN. """ def __init__(self, num_fields, embed_dim, LNN_dim, bias=False): super(LNN, self).__init__() self.num_fields = num_fields self.embed_dim = embed_dim self.LNN_dim = LNN_dim self.lnn_output_dim = LNN_dim * embed_dim self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, x): """ :param x: Long tensor of size ``(batch_size, num_fields, embedding_size)`` """ embed_x_abs = torch.abs(x) embed_x_afn = torch.add(embed_x_abs, 1e-07) embed_x_log = torch.log1p(embed_x_afn) lnn_out = torch.matmul(self.weight, embed_x_log) if self.bias is not None: lnn_out += self.bias lnn_exp = torch.expm1(lnn_out) output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_fields': 4, 'embed_dim': 4, 'LNN_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.abs(tmp0) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = libdevice.log1p(tmp3) tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_expm1_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = libdevice.expm1(tmp0) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = 0.0 tmp5 = tmp3 <= tmp4 tl.store(out_ptr0 + (x2 + 4 * y3), tmp3, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp5, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clone_expm1_relu_threshold_backward_1[grid(64, 4)]( buf1, buf2, buf3, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) return reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor( buf0, (64, 4), (4, 1), 0), buf1, buf3 class LNNNew(torch.nn.Module): """A pytorch implementation of LNN layer Input shape. - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``. Arguments - **in_features** : Embedding of feature. - **num_fields**: int.The field size of feature. - **LNN_dim**: int.The number of Logarithmic neuron. - **bias**: bool.Whether or not use bias in LNN. """ def __init__(self, num_fields, embed_dim, LNN_dim, bias=False): super(LNNNew, self).__init__() self.num_fields = num_fields self.embed_dim = embed_dim self.LNN_dim = LNN_dim self.lnn_output_dim = LNN_dim * embed_dim self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Fanxingye/Autotabular
LNN
false
5,153
[ "Apache-2.0" ]
1
d630c78290a52f8c73885afb16884e18135c34f6
https://github.com/Fanxingye/Autotabular/tree/d630c78290a52f8c73885afb16884e18135c34f6
FCDiscriminator
import torch import torch.nn as nn class FCDiscriminator(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.conv4(x) x = self.leaky_relu(x) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (1, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(65536)](buf5, primals_7, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 4, 4), (8192, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_3[grid(32768)](buf7, primals_9, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 2, 2), (4, 4, 2, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(16)](buf9, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7) class FCDiscriminatorNew(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminatorNew, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.classifier.weight primals_11 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
EvanfanBao/Adversarial_DA_Exp
FCDiscriminator
false
5,154
[ "MIT" ]
1
09979742d83fe6fd5de9b9f3aa6aa5fe9a44ea54
https://github.com/EvanfanBao/Adversarial_DA_Exp/tree/09979742d83fe6fd5de9b9f3aa6aa5fe9a44ea54
ConvNet
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 3)) x = x.view(-1, 192) x = self.fc(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1( in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 % 20 x5 = xindex // 400 x3 = xindex // 1200 x4 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (62 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (63 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (64 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (124 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (125 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (126 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tmp42 = tl.full([1], 0, tl.int32) tmp43 = triton_helpers.maximum(tmp42, tmp16) tmp44 = 0.0 tmp45 = tmp43 <= tmp44 tl.store(out_ptr0 + (x4 + 1280 * x3), tmp41, xmask) tl.store(in_out_ptr0 + (x4 + 1216 * x3), tmp43, xmask) tl.store(out_ptr1 + (x4 + 1280 * x3), tmp45, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1216 * (x0 // 1200) + x0 % 1200), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (3, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (10, 192), (192, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(46128)](buf1, primals_2, 46128, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 3, 20, 20), (1216, 400, 20, 1), torch .float32) buf3 = empty_strided_cuda((4, 3, 20, 20), (1280, 400, 20, 1), torch .int8) buf4 = buf2 del buf2 buf10 = empty_strided_cuda((4, 3, 20, 20), (1280, 400, 20, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1[grid (4800)](buf4, buf1, buf3, buf10, 4800, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((25, 192), (192, 1), torch.float32) triton_poi_fused_relu_view_2[grid(4800)](buf4, buf5, 4800, XBLOCK= 128, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((25, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf5, reinterpret_tensor(primals_4, (192, 10), (1, 192), 0), alpha=1, beta=1, out=buf6) del primals_5 buf9 = empty_strided_cuda((25, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_3[grid(25)](buf6, buf9, 25, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf6 return buf9, primals_1, primals_3, buf1, buf3, buf5, buf9, primals_4, buf10 class ConvNetNew(nn.Module): def __init__(self): super(ConvNetNew, self).__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Fanxingye/AutoDL
ConvNet
false
5,155
[ "Apache-2.0" ]
1
6f409aefc8b81e5fe47df57b82332c8df427875d
https://github.com/Fanxingye/AutoDL/tree/6f409aefc8b81e5fe47df57b82332c8df427875d
TReLU
import torch import torch.nn.functional as F import torch.nn as nn class TReLU(nn.Module): def __init__(self): super(TReLU, self).__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, x): x = F.relu(x - self.alpha) + self.alpha return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp5 + tmp2 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0[grid(256)](primals_2 , primals_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_1 del primals_2 return buf0, buf1 class TReLUNew(nn.Module): def __init__(self): super(TReLUNew, self).__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, input_0): primals_1 = self.alpha primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
FightingSrain/ColorRL
TReLU
false
5,156
[ "MIT" ]
1
2576304d56c2337e2c1cb8fba93888d984ed701b
https://github.com/FightingSrain/ColorRL/tree/2576304d56c2337e2c1cb8fba93888d984ed701b
ArcBiaffine
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class ArcBiaffine(nn.Module): """helper module for Biaffine Dependency Parser predicting arc """ def __init__(self, hidden_size, bias=True): super(ArcBiaffine, self).__init__() self.U = nn.Parameter(torch.Tensor(hidden_size, hidden_size), requires_grad=True) self.has_bias = bias if self.has_bias: self.bias = nn.Parameter(torch.Tensor(hidden_size), requires_grad=True) else: self.register_parameter('bias', None) initial_parameter(self) def forward(self, head, dep): """ :param head arc-head tensor = [batch, length, emb_dim] :param dep arc-dependent tensor = [batch, length, emb_dim] :return output tensor = [bacth, length, length] """ output = dep.matmul(self.U) output = output.bmm(head.transpose(-1, -2)) if self.has_bias: output += head.matmul(self.bias).unsqueeze(1) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mv_0[grid(16)](primals_3, primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = buf1 del buf1 triton_poi_fused_add_1[grid(64)](buf3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0) def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class ArcBiaffineNew(nn.Module): """helper module for Biaffine Dependency Parser predicting arc """ def __init__(self, hidden_size, bias=True): super(ArcBiaffineNew, self).__init__() self.U = nn.Parameter(torch.Tensor(hidden_size, hidden_size), requires_grad=True) self.has_bias = bias if self.has_bias: self.bias = nn.Parameter(torch.Tensor(hidden_size), requires_grad=True) else: self.register_parameter('bias', None) initial_parameter(self) def forward(self, input_0, input_1): primals_2 = self.U primals_4 = self.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
FengZiYjun/fastNLP
ArcBiaffine
false
5,157
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
MaxPool
import torch import torch.nn.functional as F from torch import nn import torch.utils.data class MaxPool(nn.Module): """1-d max-pooling module.""" def __init__(self, stride=None, padding=0, dilation=1): super(MaxPool, self).__init__() self.stride = stride self.padding = padding self.dilation = dilation def forward(self, x): x = torch.transpose(x, 1, 2) kernel_size = x.size(2) x = F.max_pool1d(input=x, kernel_size=kernel_size, stride=self. stride, padding=self.padding, dilation=self.dilation) return x.squeeze(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class MaxPoolNew(nn.Module): """1-d max-pooling module.""" def __init__(self, stride=None, padding=0, dilation=1): super(MaxPoolNew, self).__init__() self.stride = stride self.padding = padding self.dilation = dilation def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
FengZiYjun/fastNLP
MaxPool
false
5,158
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
Conv
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class Conv(nn.Module): """ Basic 1-d convolution module. initialize with xavier_uniform """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, activation='relu', initial_method=None): super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, groups=groups, bias=bias) activations = {'relu': nn.ReLU(), 'tanh': nn.Tanh()} if activation in activations: self.activation = activations[activation] else: raise Exception('Should choose activation function from: ' + ', '.join([x for x in activations])) initial_parameter(self, initial_method) def forward(self, x): x = torch.transpose(x, 1, 2) x = self.conv(x) x = self.activation(x) x = torch.transpose(x, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(16)](buf2, primals_3, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0 ), buf3 def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class ConvNew(nn.Module): """ Basic 1-d convolution module. initialize with xavier_uniform """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, activation='relu', initial_method=None): super(ConvNew, self).__init__() self.conv = nn.Conv1d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, groups=groups, bias=bias) activations = {'relu': nn.ReLU(), 'tanh': nn.Tanh()} if activation in activations: self.activation = activations[activation] else: raise Exception('Should choose activation function from: ' + ', '.join([x for x in activations])) initial_parameter(self, initial_method) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
FengZiYjun/fastNLP
Conv
false
5,159
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
DotAtte
import math import torch from torch import nn import torch.utils.data def seq_mask(seq_len, max_len): """Create sequence mask. :param seq_len: list or torch.Tensor, the lengths of sequences in a batch. :param max_len: int, the maximum sequence length in a batch. :return mask: torch.LongTensor, [batch_size, max_len] """ if not isinstance(seq_len, torch.Tensor): seq_len = torch.LongTensor(seq_len) seq_len = seq_len.view(-1, 1).long() seq_range = torch.arange(start=0, end=max_len, dtype=torch.long, device =seq_len.device).view(1, -1) return torch.gt(seq_len, seq_range) class DotAtte(nn.Module): def __init__(self, key_size, value_size): super(DotAtte, self).__init__() self.key_size = key_size self.value_size = value_size self.scale = math.sqrt(key_size) def forward(self, Q, K, V, seq_mask=None): """ :param Q: [batch, seq_len, key_size] :param K: [batch, seq_len, key_size] :param V: [batch, seq_len, value_size] :param seq_mask: [batch, seq_len] """ output = torch.matmul(Q, K.transpose(1, 2)) / self.scale if seq_mask is not None: output.masked_fill_(seq_mask.lt(1), -float('inf')) output = nn.functional.softmax(output, dim=2) return torch.matmul(output, V) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'key_size': 4, 'value_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del arg1_1 buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def seq_mask(seq_len, max_len): """Create sequence mask. :param seq_len: list or torch.Tensor, the lengths of sequences in a batch. :param max_len: int, the maximum sequence length in a batch. :return mask: torch.LongTensor, [batch_size, max_len] """ if not isinstance(seq_len, torch.Tensor): seq_len = torch.LongTensor(seq_len) seq_len = seq_len.view(-1, 1).long() seq_range = torch.arange(start=0, end=max_len, dtype=torch.long, device =seq_len.device).view(1, -1) return torch.gt(seq_len, seq_range) class DotAtteNew(nn.Module): def __init__(self, key_size, value_size): super(DotAtteNew, self).__init__() self.key_size = key_size self.value_size = value_size self.scale = math.sqrt(key_size) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
FengZiYjun/fastNLP
DotAtte
false
5,160
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
L2Norm
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Feywell/association_lstm_implement
L2Norm
false
5,161
[ "MIT" ]
1
4e439bd934dc865aad0015a897980a8f124602af
https://github.com/Feywell/association_lstm_implement/tree/4e439bd934dc865aad0015a897980a8f124602af
LabelBilinear
import torch from torch import nn import torch.utils.data class LabelBilinear(nn.Module): """helper module for Biaffine Dependency Parser predicting label """ def __init__(self, in1_features, in2_features, num_label, bias=True): super(LabelBilinear, self).__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.lin = nn.Linear(in1_features + in2_features, num_label, bias=False ) def forward(self, x1, x2): output = self.bilinear(x1, x2) output += self.lin(torch.cat([x1, x2], dim=2)) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in1_features': 4, 'in2_features': 4, 'num_label': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_4, (16, 4), (4, 1), 0), primals_1, reinterpret_tensor( primals_3, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_4, primals_3, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf3) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(64)](buf4, primals_2, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf3 del primals_2 return buf4, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (16, 8), (8, 1), 0) class LabelBilinearNew(nn.Module): """helper module for Biaffine Dependency Parser predicting label """ def __init__(self, in1_features, in2_features, num_label, bias=True): super(LabelBilinearNew, self).__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.lin = nn.Linear(in1_features + in2_features, num_label, bias=False ) def forward(self, input_0, input_1): primals_1 = self.bilinear.weight primals_2 = self.bilinear.bias primals_5 = self.lin.weight primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
FengZiYjun/fastNLP
LabelBilinear
false
5,162
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
BiAffine
import torch from torch import nn import torch.utils.data from torch.nn import Parameter class BiAffine(nn.Module): def __init__(self, n_enc, n_dec, n_labels, biaffine=True, **kwargs): """ Args: n_enc: int the dimension of the encoder input. n_dec: int the dimension of the decoder input. n_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: """ super(BiAffine, self).__init__() self.n_enc = n_enc self.n_dec = n_dec self.num_labels = n_labels self.biaffine = biaffine self.W_d = Parameter(torch.Tensor(self.num_labels, self.n_dec)) self.W_e = Parameter(torch.Tensor(self.num_labels, self.n_enc)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self.n_dec, self.n_enc)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.W_d) nn.init.xavier_uniform_(self.W_e) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_d, input_e, mask_d=None, mask_e=None): """ Args: input_d: Tensor the decoder input tensor with shape = [batch, length_decoder, input_size] input_e: Tensor the child input tensor with shape = [batch, length_encoder, input_size] mask_d: Tensor or None the mask tensor for decoder with shape = [batch, length_decoder] mask_e: Tensor or None the mask tensor for encoder with shape = [batch, length_encoder] Returns: Tensor the energy tensor with shape = [batch, num_label, length, length] """ assert input_d.size(0) == input_e.size(0 ), 'batch sizes of encoder and decoder are requires to be equal.' _batch, _length_decoder, _ = input_d.size() _, _length_encoder, _ = input_e.size() out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3) out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2) if self.biaffine: output = torch.matmul(input_d.unsqueeze(1), self.U) output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3)) output = output + out_d + out_e + self.b else: output = out_d + out_d + self.b if mask_d is not None: output = output * mask_d.unsqueeze(1).unsqueeze(3 ) * mask_e.unsqueeze(1).unsqueeze(2) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_enc': 4, 'n_dec': 4, 'n_labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x2 + 4 * x1 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_5, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = buf3 del buf3 triton_poi_fused_clone_2[grid(256)](primals_2, buf5, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_add_3[grid(256)](buf7, buf0, buf1, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_6 return buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0) class BiAffineNew(nn.Module): def __init__(self, n_enc, n_dec, n_labels, biaffine=True, **kwargs): """ Args: n_enc: int the dimension of the encoder input. n_dec: int the dimension of the decoder input. n_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: """ super(BiAffineNew, self).__init__() self.n_enc = n_enc self.n_dec = n_dec self.num_labels = n_labels self.biaffine = biaffine self.W_d = Parameter(torch.Tensor(self.num_labels, self.n_dec)) self.W_e = Parameter(torch.Tensor(self.num_labels, self.n_enc)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self.n_dec, self.n_enc)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.W_d) nn.init.xavier_uniform_(self.W_e) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_0, input_1): primals_3 = self.W_d primals_4 = self.W_e primals_6 = self.b primals_1 = self.U primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
FengZiYjun/fastNLP
BiAffine
false
5,163
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
FocalLoss
import torch import torch.nn.functional as F import torch.nn as nn class FocalLoss(nn.Module): """ from https://github.com/CellProfiling/HPA-competition-solutions/blob/master/bestfitting/src/layers/loss.py """ def __init__(self, gamma=2): super().__init__() self.gamma = gamma def forward(self, logit, target): max_val = (-logit).clamp(min=0) loss = logit - logit * target + max_val + ((-max_val).exp() + (- logit - max_val).exp()).log() invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0)) loss = (invprobs * self.gamma).exp() * loss if len(loss.size()) == 2: loss = loss.sum(dim=1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tl_math.exp(tmp15) tmp17 = tmp0 * tmp2 tmp18 = tmp0 - tmp17 tmp19 = triton_helpers.maximum(tmp1, tmp8) tmp20 = tmp18 + tmp19 tmp21 = -tmp19 tmp22 = tl_math.exp(tmp21) tmp23 = tmp1 - tmp19 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tl_math.log(tmp25) tmp27 = tmp20 + tmp26 tmp28 = tmp16 * tmp27 tmp29 = tl.broadcast_to(tmp28, [RBLOCK]) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0)) tmp32 = 256.0 tmp33 = tmp31 / tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0[ grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class FocalLossNew(nn.Module): """ from https://github.com/CellProfiling/HPA-competition-solutions/blob/master/bestfitting/src/layers/loss.py """ def __init__(self, gamma=2): super().__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Fkaneko/kaggle-hpa-single-cell-image-classification
FocalLoss
false
5,164
[ "MIT" ]
1
52000cbf5c7eec6ace29274d9e85b5b24fac281b
https://github.com/Fkaneko/kaggle-hpa-single-cell-image-classification/tree/52000cbf5c7eec6ace29274d9e85b5b24fac281b
ConvNet
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class ConvNet(nn.Module): def __init__(self, NumChannels): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(NumChannels, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 1000) self.fc2 = nn.Linear(1000, 10) def forward(self, x): out = self.pool(F.relu(self.conv1(x))) out = self.pool(F.relu(self.conv2(out))) out = out.view(-1, 16 * 5 * 5) out = F.relu(self.fc1(out)) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 32, 32])] def get_init_inputs(): return [[], {'NumChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 1000 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (6, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 4, 32, 32), (4096, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (1000, 400), (400, 1)) assert_size_stride(primals_7, (1000,), (1,)) assert_size_stride(primals_8, (10, 1000), (1000, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 1000), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(4000)](buf9, primals_7, 4000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (1000, 10), (1, 1000), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, primals_8, primals_6) class ConvNetNew(nn.Module): def __init__(self, NumChannels): super(ConvNetNew, self).__init__() self.conv1 = nn.Conv2d(NumChannels, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 1000) self.fc2 = nn.Linear(1000, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
FedericoZocco/VarMemLBFGS-PyTorch
ConvNet
false
5,165
[ "MIT" ]
1
5a0ed7b95fc71c9a421a07071f8d5199cf6a6216
https://github.com/FedericoZocco/VarMemLBFGS-PyTorch/tree/5a0ed7b95fc71c9a421a07071f8d5199cf6a6216
BCELoss2d
import torch import torch.nn as nn import torch.nn.functional as F class BCELoss2d(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2d, self).__init__() self.criterion = nn.BCELoss(weight, size_average) def forward(self, inputs, targets): probs = F.sigmoid(inputs) probs_flat = probs.view(-1) targets_flat = targets.view(-1) loss = self.criterion(probs_flat, targets_flat) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCELoss2dNew(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2dNew, self).__init__() self.criterion = nn.BCELoss(weight, size_average) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ForrestPi/SegDL
BCELoss2d
false
5,166
[ "MIT" ]
1
56f2ff229dfa7540704d6de50292c724693aac75
https://github.com/ForrestPi/SegDL/tree/56f2ff229dfa7540704d6de50292c724693aac75
T5LayerNorm
import torch import torch.nn as nn import torch.utils.checkpoint class T5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self. variance_epsilon) if self.weight.dtype == torch.float16: hidden_states = hidden_states return self.weight * hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp1 * tmp17 tmp19 = tmp0 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class T5LayerNormNew(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Elvisambition/bert_seq2seq
T5LayerNorm
false
5,167
[ "Apache-2.0" ]
1
643ac537c16872f0d13200de06001d8201a54fbb
https://github.com/Elvisambition/bert_seq2seq/tree/643ac537c16872f0d13200de06001d8201a54fbb
Scale
import torch from torch import nn class Scale(nn.Module): def __init__(self, scale): super().__init__() self.scale = scale def forward(self, x): return x * self.scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ScaleNew(nn.Module): def __init__(self, scale): super().__init__() self.scale = scale def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
FranardoHuang/ROAR
Scale
false
5,168
[ "Apache-2.0" ]
1
859e22389907dd0e61c83980ae5ff6dae51341d3
https://github.com/FranardoHuang/ROAR/tree/859e22389907dd0e61c83980ae5ff6dae51341d3
GlobalAttentionGeneral
import torch import torch.nn as nn import torch.nn.parallel def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneral(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneral, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input, context): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if self.mask is not None: mask = self.mask.repeat(queryL, 1) attn.data.masked_fill_(mask.data, -float('inf')) attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'idf': 4, 'cdf': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask) tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1, buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6 def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneralNew(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneralNew, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input_0, input_1): primals_3 = self.conv_context.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
FiroshV/TTI
GlobalAttentionGeneral
false
5,169
[ "MIT" ]
1
4d5a40b0ec69a47faf5256caa6d731e95d1f7b9a
https://github.com/FiroshV/TTI/tree/4d5a40b0ec69a47faf5256caa6d731e95d1f7b9a
ArcMarginProduct_subcenter
import math import torch import torch.nn.functional as F import torch.nn as nn class ArcMarginProduct_subcenter(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine_all = F.linear(F.normalize(features), F.normalize(self.weight)) cosine_all = cosine_all.view(-1, self.out_features, self.k) cosine, _ = torch.max(cosine_all, dim=2) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 > tmp1 tmp6 = tmp0 == tmp1 tmp7 = tmp0 != tmp0 tmp8 = tmp1 != tmp1 tmp9 = tmp7 > tmp8 tmp10 = tmp5 | tmp9 tmp11 = tmp7 & tmp8 tmp12 = tmp6 | tmp11 tmp13 = tl.full([1], 0, tl.int64) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp13 < tmp14 tmp16 = tmp12 & tmp15 tmp17 = tmp10 | tmp16 tmp18 = tl.where(tmp17, tmp0, tmp1) tmp19 = tl.where(tmp17, tmp13, tmp14) tmp20 = tmp18 > tmp3 tmp21 = tmp18 == tmp3 tmp22 = tmp18 != tmp18 tmp23 = tmp3 != tmp3 tmp24 = tmp22 > tmp23 tmp25 = tmp20 | tmp24 tmp26 = tmp22 & tmp23 tmp27 = tmp21 | tmp26 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp19 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tmp25 | tmp30 tl.where(tmp31, tmp18, tmp3) tmp33 = tl.where(tmp31, tmp19, tmp28) tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp33, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(48)](primals_2, buf1, 48, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 12), (1, 4), 0), out=buf2) del buf1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.int64) triton_poi_fused_max_2[grid(256)](buf2, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 1), 0) class ArcMarginProduct_subcenterNew(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Fkaneko/kaggle-hpa-single-cell-image-classification
ArcMarginProduct_subcenter
false
5,170
[ "MIT" ]
1
52000cbf5c7eec6ace29274d9e85b5b24fac281b
https://github.com/Fkaneko/kaggle-hpa-single-cell-image-classification/tree/52000cbf5c7eec6ace29274d9e85b5b24fac281b
DownConv
import torch import torch.nn as nn import torch.nn.functional as F def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1 ): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias, groups=groups) class DownConv(nn.Module): """ A helper Module that performs 2 convolutions and 1 MaxPool. A ReLU activation follows each convolution. """ def __init__(self, in_channels, out_channels, pooling=True): super(DownConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.pooling = pooling self.conv1 = conv3x3(self.in_channels, self.out_channels) self.conv2 = conv3x3(self.out_channels, self.out_channels) if self.pooling: self.pool = nn.MaxPool2d(kernel_size=2, stride=2) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) before_pool = x if self.pooling: x = self.pool(x) return x, before_pool def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(64)](buf3, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf4, buf3, primals_1, primals_3, primals_4, buf1, buf3, buf5 def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1 ): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias, groups=groups) class DownConvNew(nn.Module): """ A helper Module that performs 2 convolutions and 1 MaxPool. A ReLU activation follows each convolution. """ def __init__(self, in_channels, out_channels, pooling=True): super(DownConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.pooling = pooling self.conv1 = conv3x3(self.in_channels, self.out_channels) self.conv2 = conv3x3(self.out_channels, self.out_channels) if self.pooling: self.pool = nn.MaxPool2d(kernel_size=2, stride=2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
ForrestPi/SegDL
DownConv
false
5,171
[ "MIT" ]
1
56f2ff229dfa7540704d6de50292c724693aac75
https://github.com/ForrestPi/SegDL/tree/56f2ff229dfa7540704d6de50292c724693aac75
RefineLoss
import torch import numpy as np import torch.nn as nn class RefineLoss(nn.Module): def __init__(self, alpha=1.5, alpha1=0.5, reduction='mean'): super(RefineLoss, self).__init__() self.alpha = alpha self.alpha1 = alpha1 self.reduction = reduction self.fx = nn.Conv2d(1, 1, 3, padding=1, bias=False) self.fy = nn.Conv2d(1, 1, 3, padding=1, bias=False) ngx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=np.float32) ngy = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=np.float32) self.fx.weight.data.copy_(torch.from_numpy(ngx)) self.fy.weight.data.copy_(torch.from_numpy(ngy)) for param in self.fx.parameters(): param.requires_grad = False for param in self.fy.parameters(): param.requires_grad = False def forward(self, grayimg, pred, mask): """ grayimg: gray scale input image pred: predicted mask mask: boundary mask. can be generate from ground truth foreground mask by morphological transformation """ gx = self.fx(grayimg) gy = self.fy(grayimg) px = self.fx(pred) py = self.fy(pred) gm = torch.sqrt(gx * gx + gy * gy + 1e-06) pm = torch.sqrt(px * px + py * py + 1e-06) gv = gx / gm, gy / gm pv = px / pm, py / pm Lcos = (1 - torch.abs(gv[0] * pv[0] + gv[1] * pv[1])) * pm Lmag = torch.clamp_min(self.alpha * gm - pm, 0) Lrefine = (self.alpha1 * Lcos + (1 - self.alpha1) * Lmag) * mask if self.reduction == 'mean': Lrefine = Lrefine.mean() elif self.reduction == 'sum': Lrefine = Lrefine.sum() return Lrefine def get_inputs(): return [torch.rand([4, 1, 64, 64]), torch.rand([4, 1, 64, 64]), torch. rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp2 = tl.load(in_ptr0 + x0, None) tmp9 = tl.load(in_ptr1 + x0, None) tmp11 = tl.load(in_ptr2 + x0, None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp5 = 1e-06 tmp6 = tmp4 + tmp5 tmp7 = libdevice.sqrt(tmp6) tmp8 = tmp0 / tmp7 tmp10 = tmp9 * tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp13 + tmp5 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp9 / tmp15 tmp17 = tmp8 * tmp16 tmp18 = tmp2 / tmp7 tmp19 = tmp11 / tmp15 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl_math.abs(tmp21) tmp23 = 1.0 tmp24 = tmp23 - tmp22 tmp25 = tmp24 * tmp15 tmp26 = 0.5 tmp27 = tmp25 * tmp26 tmp28 = 1.5 tmp29 = tmp7 * tmp28 tmp30 = tmp29 - tmp15 tmp31 = 0.0 tmp32 = triton_helpers.maximum(tmp30, tmp31) tmp33 = tmp32 * tmp26 tmp34 = tmp27 + tmp33 tl.store(in_out_ptr0 + x0, tmp34, None) @triton.jit def triton_red_fused_mean_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 8 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (4096 * ((r1 + 8192 * x0) // 16384) + r1 % 4096), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = _tmp4 + tmp3 _tmp4 = tl.where(rmask & xmask, tmp5, _tmp4) tmp4 = tl.sum(_tmp4, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_per_fused_mean_mul_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 65536.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg3_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg4_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = extern_kernels.convolution(arg1_1, arg2_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg1_1 buf2 = extern_kernels.convolution(arg3_1, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg0_1 buf3 = extern_kernels.convolution(arg3_1, arg2_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg2_1 del arg3_1 buf4 = reinterpret_tensor(buf0, (4, 1, 64, 64), (4096, 16384, 64, 1), 0 ) del buf0 get_raw_stream(0) triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0[grid(16384) ](buf4, buf1, buf2, buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del buf3 buf5 = empty_strided_cuda((8,), (1,), torch.float32) triton_red_fused_mean_mul_1[grid(8)](buf4, arg4_1, buf5, 8, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del arg4_1 del buf4 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_mean_mul_2[grid(1)](buf7, buf5, 1, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf5 return buf7, class RefineLossNew(nn.Module): def __init__(self, alpha=1.5, alpha1=0.5, reduction='mean'): super(RefineLossNew, self).__init__() self.alpha = alpha self.alpha1 = alpha1 self.reduction = reduction self.fx = nn.Conv2d(1, 1, 3, padding=1, bias=False) self.fy = nn.Conv2d(1, 1, 3, padding=1, bias=False) ngx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=np.float32) ngy = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=np.float32) self.fx.weight.data.copy_(torch.from_numpy(ngx)) self.fy.weight.data.copy_(torch.from_numpy(ngy)) for param in self.fx.parameters(): param.requires_grad = False for param in self.fy.parameters(): param.requires_grad = False def forward(self, input_0, input_1, input_2): arg0_1 = self.fx.weight arg2_1 = self.fy.weight arg1_1 = input_0 arg3_1 = input_1 arg4_1 = input_2 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
ForrestPi/SegDL
RefineLoss
false
5,172
[ "MIT" ]
1
56f2ff229dfa7540704d6de50292c724693aac75
https://github.com/ForrestPi/SegDL/tree/56f2ff229dfa7540704d6de50292c724693aac75
Downsample
import torch import torch.nn as nn import torch.hub class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = 0, 1, 0, 1 x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 class DownsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Frikallo/YAKbot
Downsample
false
5,173
[ "MIT" ]
1
bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
https://github.com/Frikallo/YAKbot/tree/bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
Upsample
import torch import torch.nn as nn import torch.hub class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode='nearest' ) if self.with_conv: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class UpsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Frikallo/YAKbot
Upsample
false
5,174
[ "MIT" ]
1
bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
https://github.com/Frikallo/YAKbot/tree/bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
Attention
import torch import torch as th from torch import nn import torch.nn.functional as F class Attention(nn.Module): def __init__(self, encoder_dim, decoder_dim, attention_dim): super(Attention, self).__init__() self.attention_dim = attention_dim self.W = nn.Linear(decoder_dim, attention_dim) self.U = nn.Linear(encoder_dim, attention_dim) self.A = nn.Linear(attention_dim, 1) def forward(self, features, hidden_state): u_hs = self.U(features) w_ah = self.W(hidden_state) combined_states = th.tanh(u_hs + w_ah.unsqueeze(1)) attention_scores = self.A(combined_states) attention_scores = attention_scores.squeeze(2) alpha = F.softmax(attention_scores, dim=1) attention_weights = features * alpha.unsqueeze(2) attention_weights = attention_weights.sum(dim=1) return attention_weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'encoder_dim': 4, 'decoder_dim': 4, 'attention_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = xindex // 256 x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x1 = xindex // 4 % 16 x3 = xindex // 256 x5 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (16 + x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (32 + x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (48 + x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp0 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp0 * tmp9 tmp11 = tmp8 + tmp10 tl.store(out_ptr0 + x5, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_2, buf1, primals_5, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf0 triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_3[grid(1024)](primals_3, buf6, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf6 return buf7, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), buf2, buf4, primals_7 class AttentionNew(nn.Module): def __init__(self, encoder_dim, decoder_dim, attention_dim): super(AttentionNew, self).__init__() self.attention_dim = attention_dim self.W = nn.Linear(decoder_dim, attention_dim) self.U = nn.Linear(encoder_dim, attention_dim) self.A = nn.Linear(attention_dim, 1) def forward(self, input_0, input_1): primals_1 = self.W.weight primals_2 = self.W.bias primals_4 = self.U.weight primals_5 = self.U.bias primals_7 = self.A.weight primals_8 = self.A.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
FranardoHuang/ROAR
Attention
false
5,175
[ "Apache-2.0" ]
1
859e22389907dd0e61c83980ae5ff6dae51341d3
https://github.com/FranardoHuang/ROAR/tree/859e22389907dd0e61c83980ae5ff6dae51341d3
DeterministicCriticNet
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class DeterministicCriticNet(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, gpu=False, batch_norm=False, non_linear=F.relu, hidden_size=64): super(DeterministicCriticNet, self).__init__() self.layer1 = nn.Linear(state_dim, hidden_size) self.layer2 = nn.Linear(hidden_size + action_dim, hidden_size) self.layer3 = nn.Linear(hidden_size, 1) self.non_linear = non_linear if batch_norm: self.bn1 = nn.BatchNorm1d(hidden_size) self.bn2 = nn.BatchNorm1d(hidden_size) self.batch_norm = batch_norm BasicNet.__init__(self, None, gpu, False) self.init_weights() def init_weights(self): bound = 0.003 self.layer3.weight.data.uniform_(-bound, bound) self.layer3.bias.data.fill_(0) def fanin(size): v = 1.0 / np.sqrt(size[1]) return torch.FloatTensor(size).uniform_(-v, v) self.layer1.weight.data = fanin(self.layer1.weight.data.size()) self.layer1.bias.data.fill_(0) self.layer2.weight.data = fanin(self.layer2.weight.data.size()) self.layer2.bias.data.fill_(0) def forward(self, x, action): x = self.to_torch_variable(x) action = self.to_torch_variable(action) x = self.non_linear(self.layer1(x)) if self.batch_norm: x = self.bn1(x) x = self.non_linear(self.layer2(torch.cat([x, action], dim=1))) if self.batch_norm: x = self.bn2(x) x = self.layer3(x) return x def predict(self, x, action): return self.forward(x, action) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 68 x1 = xindex // 68 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 68, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-64 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (64, 68), (68, 1)) assert_size_stride(primals_6, (64,), (1,)) assert_size_stride(primals_7, (1, 64), (64, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 68), (68, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(272)](buf0, primals_3, primals_1, buf1, 272, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (68, 64), (1, 68), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(256)](buf3, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 64), (64, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf0, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 return buf5, primals_4, buf1, buf3, primals_7, primals_5, buf6 class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class DeterministicCriticNetNew(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, gpu=False, batch_norm=False, non_linear=F.relu, hidden_size=64): super(DeterministicCriticNetNew, self).__init__() self.layer1 = nn.Linear(state_dim, hidden_size) self.layer2 = nn.Linear(hidden_size + action_dim, hidden_size) self.layer3 = nn.Linear(hidden_size, 1) self.non_linear = non_linear if batch_norm: self.bn1 = nn.BatchNorm1d(hidden_size) self.bn2 = nn.BatchNorm1d(hidden_size) self.batch_norm = batch_norm BasicNet.__init__(self, None, gpu, False) self.init_weights() def init_weights(self): bound = 0.003 self.layer3.weight.data.uniform_(-bound, bound) self.layer3.bias.data.fill_(0) def fanin(size): v = 1.0 / np.sqrt(size[1]) return torch.FloatTensor(size).uniform_(-v, v) self.layer1.weight.data = fanin(self.layer1.weight.data.size()) self.layer1.bias.data.fill_(0) self.layer2.weight.data = fanin(self.layer2.weight.data.size()) self.layer2.bias.data.fill_(0) def predict(self, x, action): return self.forward(x, action) def forward(self, input_0, input_1): primals_2 = self.layer1.weight primals_3 = self.layer1.bias primals_5 = self.layer2.weight primals_6 = self.layer2.bias primals_7 = self.layer3.weight primals_8 = self.layer3.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
G-Flor/deeprl
DeterministicCriticNet
false
5,176
[ "Apache-2.0" ]
1
aeae2c5d585e5853dc638968b1f090eb60abd351
https://github.com/G-Flor/deeprl/tree/aeae2c5d585e5853dc638968b1f090eb60abd351
MTFullyConnected
import time import torch import numpy as np from torch import nn from torch import optim from torch.nn import functional as F class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class MTFullyConnected(Base): """Multi-task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_task (int): the No. of columns (tasks) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_task, is_reg=False): super(MTFullyConnected, self).__init__() self.n_task = n_task self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.output = nn.Linear(2000, n_task) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() else: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() self def forward(self, X, istrain=False): """Invoke the class directly as a function Arguments: X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features. istrain (bool, optional): is it invoked during training process (True) or just for prediction (False) Return: y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of tasks """ y = F.relu(self.fc0(X)) if istrain: y = self.dropout(y) y = F.relu(self.fc1(y)) if istrain: y = self.dropout(y) y = F.relu(self.fc2(y)) if istrain: y = self.dropout(y) if self.is_reg: y = self.output(y) else: y = self.activation(self.output(y)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dim': 4, 'n_task': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import time import numpy as np from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8000 x1 = xindex // 8000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 8064 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4000 x1 = xindex // 4000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = xindex // 2000 tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask) tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8000, 4), (4, 1)) assert_size_stride(primals_2, (8000,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4000, 8000), (8000, 1)) assert_size_stride(primals_5, (4000,), (1,)) assert_size_stride(primals_6, (2000, 4000), (4000, 1)) assert_size_stride(primals_7, (2000,), (1,)) assert_size_stride(primals_8, (4, 2000), (2000, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1 ), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512000)](buf1, primals_2, buf10, 512000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0) del buf2 buf9 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256000)](buf3, primals_5, buf9, 256000, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(128000)](buf5, primals_7, buf8, 128000, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0 ), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), out=buf6 ) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_sigmoid_3[grid(256)](buf7, primals_9, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0 ), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10 class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class MTFullyConnectedNew(Base): """Multi-task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_task (int): the No. of columns (tasks) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_task, is_reg=False): super(MTFullyConnectedNew, self).__init__() self.n_task = n_task self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.output = nn.Linear(2000, n_task) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() else: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() self def forward(self, input_0): primals_1 = self.fc0.weight primals_2 = self.fc0.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.output.weight primals_9 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
EXYNOS-999/DrugEx
MTFullyConnected
false
5,177
[ "MIT" ]
1
f75a90fbc0b9863d594fbff6afecb0f866c076d6
https://github.com/EXYNOS-999/DrugEx/tree/f75a90fbc0b9863d594fbff6afecb0f866c076d6
CRFLayer
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.checkpoint class CRFLayer(nn.Module): """ """ def __init__(self, output_dim): super(CRFLayer, self).__init__() self.output_dim = output_dim self.trans = nn.Parameter(torch.Tensor(output_dim, output_dim)) self.trans.data.uniform_(-0.1, 0.1) def compute_loss(self, y_pred, y_true, mask): """ 计算CRF损失 """ y_pred = y_pred * mask y_true = y_true * mask target_score = self.target_score(y_pred, y_true) log_norm = self.log_norm_step(y_pred, mask) log_norm = self.logsumexp(log_norm, dim=1) return log_norm - target_score def forward(self, y_pred, y_true, mask): """ y_true: [[1, 2, 3], [2, 3, 0] ] mask: [[1, 1, 1], [1, 1, 0]] """ if y_pred.shape[0] != mask.shape[0] or y_pred.shape[1] != mask.shape[1 ]: raise Exception('mask shape is not match to y_pred shape') mask = mask.reshape((mask.shape[0], mask.shape[1], 1)) mask = mask.float() y_true = y_true.reshape(y_pred.shape[:-1]) y_true = y_true.long() y_true_onehot = F.one_hot(y_true, self.output_dim) y_true_onehot = y_true_onehot.float() return self.compute_loss(y_pred, y_true_onehot, mask) def target_score(self, y_pred, y_true): """ 计算状态标签得分 + 转移标签得分 y_true: (batch, seq_len, out_dim) y_pred: (batch, seq_len, out_dim) """ point_score = torch.einsum('bni,bni->b', y_pred, y_true) trans_score = torch.einsum('bni,ij,bnj->b', y_true[:, :-1], self. trans, y_true[:, 1:]) return point_score + trans_score def log_norm_step(self, y_pred, mask): """ 计算归一化因子Z(X) """ state = y_pred[:, 0] y_pred = y_pred[:, 1:].contiguous() mask = mask[:, 1:].contiguous() _batch, seq_len, _out_dim = y_pred.shape for t in range(seq_len): cur_mask = mask[:, t] state = torch.unsqueeze(state, 2) g = torch.unsqueeze(self.trans, 0) outputs = self.logsumexp(state + g, dim=1) outputs = outputs + y_pred[:, t] outputs = cur_mask * outputs + (1 - cur_mask) * state.squeeze(-1) state = outputs return outputs def logsumexp(self, x, dim=None, keepdim=False): """ 避免溢出 """ if dim is None: x, dim = x.view(-1), 0 xm, _ = torch.max(x, dim, keepdim=True) out = xm + torch.log(torch.sum(torch.exp(x - xm), dim=dim, keepdim= True)) return out if keepdim else out.squeeze(dim) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 1])] def get_init_inputs(): return [[], {'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3.to(tl.int64) tmp5 = y0 tmp6 = tmp4 == tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp9, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 3 x2 = xindex // 12 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr1 + (x1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.int64) tmp2 = x0 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x2 = xindex // 12 x1 = xindex // 3 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (1 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (1 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = x1 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 3 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 12 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 3 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr3 + (4 + x0 + 16 * x1), xmask) tmp35 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr3 + (8 + x0 + 16 * x1), xmask) tmp35 = tl.load(in_ptr0 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr3 + (12 + x0 + 16 * x1), xmask) tmp35 = tl.load(in_ptr0 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_sub_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_out_ptr0 + x0, xmask) tmp21 = tl.load(in_ptr1 + x0, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tmp1 - tmp6 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp3 - tmp6 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp15 = tmp5 - tmp6 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 + tmp18 tmp22 = tmp20 + tmp21 tmp23 = tmp19 - tmp22 tl.store(in_out_ptr0 + x0, tmp23, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_1, primals_2, primals_3, buf0, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(buf1, (4, 16, 1), (16, 1, 0), 0), out=buf2) del buf0 buf3 = empty_strided_cuda((4, 3, 4, 1), (12, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(48)](primals_3, primals_2, buf3, 48, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((1, 12, 4), (48, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (1, 12, 4), (0, 4, 1), 0), reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(48)](primals_3, primals_2, buf5, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf6 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 3)](buf4, buf6, 16, 3, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf4 buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 12), (12, 0, 1), 0), reinterpret_tensor(buf6, (4, 12, 1), (12, 1, 0), 0), out=buf7) del buf6 buf8 = buf1 del buf1 triton_poi_fused_mul_4[grid(64)](primals_1, primals_2, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf11 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5[grid(16)](buf11, buf8, primals_4, primals_2, primals_1, 16, XBLOCK=16, num_warps =1, num_stages=1) buf13 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf14 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0) del buf13 triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6[grid(16)](buf14, buf11, primals_4, primals_2, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = reinterpret_tensor(buf11, (4, 1, 4), (4, 16, 1), 0) del buf11 buf17 = reinterpret_tensor(buf16, (4, 4), (4, 1), 0) del buf16 triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7[grid(16)](buf17, buf14, primals_4, primals_2, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf14 buf18 = reinterpret_tensor(buf2, (4,), (1,), 0) del buf2 triton_poi_fused_add_sub_8[grid(4)](buf18, buf17, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf17 del buf7 return buf18, primals_1, primals_2, primals_4, reinterpret_tensor(buf8, (4, 4, 1), (16, 1, 1), 0), reinterpret_tensor(buf5, (4, 12, 1), (12, 1, 12), 0), reinterpret_tensor(buf3, (1, 4, 12), (48, 1, 4), 0) class CRFLayerNew(nn.Module): """ """ def __init__(self, output_dim): super(CRFLayerNew, self).__init__() self.output_dim = output_dim self.trans = nn.Parameter(torch.Tensor(output_dim, output_dim)) self.trans.data.uniform_(-0.1, 0.1) def compute_loss(self, y_pred, y_true, mask): """ 计算CRF损失 """ y_pred = y_pred * mask y_true = y_true * mask target_score = self.target_score(y_pred, y_true) log_norm = self.log_norm_step(y_pred, mask) log_norm = self.logsumexp(log_norm, dim=1) return log_norm - target_score def target_score(self, y_pred, y_true): """ 计算状态标签得分 + 转移标签得分 y_true: (batch, seq_len, out_dim) y_pred: (batch, seq_len, out_dim) """ point_score = torch.einsum('bni,bni->b', y_pred, y_true) trans_score = torch.einsum('bni,ij,bnj->b', y_true[:, :-1], self. trans, y_true[:, 1:]) return point_score + trans_score def log_norm_step(self, y_pred, mask): """ 计算归一化因子Z(X) """ state = y_pred[:, 0] y_pred = y_pred[:, 1:].contiguous() mask = mask[:, 1:].contiguous() _batch, seq_len, _out_dim = y_pred.shape for t in range(seq_len): cur_mask = mask[:, t] state = torch.unsqueeze(state, 2) g = torch.unsqueeze(self.trans, 0) outputs = self.logsumexp(state + g, dim=1) outputs = outputs + y_pred[:, t] outputs = cur_mask * outputs + (1 - cur_mask) * state.squeeze(-1) state = outputs return outputs def logsumexp(self, x, dim=None, keepdim=False): """ 避免溢出 """ if dim is None: x, dim = x.view(-1), 0 xm, _ = torch.max(x, dim, keepdim=True) out = xm + torch.log(torch.sum(torch.exp(x - xm), dim=dim, keepdim= True)) return out if keepdim else out.squeeze(dim) def forward(self, input_0, input_1, input_2): primals_3 = self.trans primals_1 = input_0 primals_4 = input_1 primals_2 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Elvisambition/bert_seq2seq
CRFLayer
false
5,178
[ "Apache-2.0" ]
1
643ac537c16872f0d13200de06001d8201a54fbb
https://github.com/Elvisambition/bert_seq2seq/tree/643ac537c16872f0d13200de06001d8201a54fbb
GaussianCriticNet
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianCriticNet(nn.Module, BasicNet): def __init__(self, state_dim, gpu=False, hidden_size=64): super(GaussianCriticNet, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc_value = nn.Linear(hidden_size, 1) BasicNet.__init__(self, None, gpu, False) def forward(self, x): x = self.to_torch_variable(x) phi = F.tanh(self.fc1(x)) phi = F.tanh(self.fc2(phi)) value = self.fc_value(phi) return value def predict(self, x): return self.forward(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (1, 64), (64, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_3, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianCriticNetNew(nn.Module, BasicNet): def __init__(self, state_dim, gpu=False, hidden_size=64): super(GaussianCriticNetNew, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc_value = nn.Linear(hidden_size, 1) BasicNet.__init__(self, None, gpu, False) def predict(self, x): return self.forward(x) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc_value.weight primals_7 = self.fc_value.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
G-Flor/deeprl
GaussianCriticNet
false
5,179
[ "Apache-2.0" ]
1
aeae2c5d585e5853dc638968b1f090eb60abd351
https://github.com/G-Flor/deeprl/tree/aeae2c5d585e5853dc638968b1f090eb60abd351
ConditionalRandomField
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) def log_sum_exp(x, dim=-1): max_value, _ = x.max(dim=dim, keepdim=True) res = torch.log(torch.sum(torch.exp(x - max_value), dim=dim, keepdim=True) ) + max_value return res.squeeze(dim) class ConditionalRandomField(nn.Module): def __init__(self, tag_size, include_start_end_trans=False, initial_method=None): """ :param tag_size: int, num of tags :param include_start_end_trans: bool, whether to include start/end tag """ super(ConditionalRandomField, self).__init__() self.include_start_end_trans = include_start_end_trans self.tag_size = tag_size self.trans_m = nn.Parameter(torch.randn(tag_size, tag_size)) if self.include_start_end_trans: self.start_scores = nn.Parameter(torch.randn(tag_size)) self.end_scores = nn.Parameter(torch.randn(tag_size)) initial_parameter(self, initial_method) def reset_parameter(self): nn.init.xavier_normal_(self.trans_m) if self.include_start_end_trans: nn.init.normal_(self.start_scores) nn.init.normal_(self.end_scores) def _normalizer_likelihood(self, logits, mask): """ Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences. :param logits:FloatTensor, max_len x batch_size x tag_size :param mask:ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, n_tags = logits.size() alpha = logits[0] if self.include_start_end_trans: alpha += self.start_scores.view(1, -1) for i in range(1, seq_len): emit_score = logits[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags) tmp = alpha.view(batch_size, n_tags, 1) + emit_score + trans_score alpha = log_sum_exp(tmp, 1) * mask[i].view(batch_size, 1 ) + alpha * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: alpha += self.end_scores.view(1, -1) return log_sum_exp(alpha, 1) def _glod_score(self, logits, tags, mask): """ Compute the score for the gold path. :param logits: FloatTensor, max_len x batch_size x tag_size :param tags: LongTensor, max_len x batch_size :param mask: ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, _ = logits.size() batch_idx = torch.arange(batch_size, dtype=torch.long, device= logits.device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=logits.device) trans_score = self.trans_m[tags[:seq_len - 1], tags[1:]] * mask[1:, :] emit_score = logits[seq_idx.view(-1, 1), batch_idx.view(1, -1), tags ] * mask score = trans_score + emit_score[:seq_len - 1, :] score = score.sum(0) + emit_score[-1] * mask[-1] if self.include_start_end_trans: st_scores = self.start_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[0]] last_idx = mask.long().sum(0) - 1 ed_scores = self.end_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[last_idx, batch_idx]] score += st_scores + ed_scores return score def forward(self, feats, tags, mask): """ Calculate the neg log likelihood :param feats:FloatTensor, batch_size x max_len x tag_size :param tags:LongTensor, batch_size x max_len :param mask:ByteTensor batch_size x max_len :return:FloatTensor, batch_size """ feats = feats.transpose(0, 1) tags = tags.transpose(0, 1).long() mask = mask.transpose(0, 1).float() all_path_score = self._normalizer_likelihood(feats, mask) gold_path_score = self._glod_score(feats, tags, mask) return all_path_score - gold_path_score def viterbi_decode(self, data, mask, get_score=False): """ Given a feats matrix, return best decode path and best score. :param data:FloatTensor, batch_size x max_len x tag_size :param mask:ByteTensor batch_size x max_len :param get_score: bool, whether to output the decode score. :return: scores, paths """ batch_size, seq_len, n_tags = data.size() data = data.transpose(0, 1).data mask = mask.transpose(0, 1).data.float() vpath = data.new_zeros((seq_len, batch_size, n_tags), dtype=torch.long) vscore = data[0] if self.include_start_end_trans: vscore += self.start_scores.view(1, -1) for i in range(1, seq_len): prev_score = vscore.view(batch_size, n_tags, 1) cur_score = data[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags).data score = prev_score + trans_score + cur_score best_score, best_dst = score.max(1) vpath[i] = best_dst vscore = best_score * mask[i].view(batch_size, 1) + vscore * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: vscore += self.end_scores.view(1, -1) batch_idx = torch.arange(batch_size, dtype=torch.long, device=data. device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=data.device) lens = mask.long().sum(0) - 1 idxes = (lens.view(1, -1) - seq_idx.view(-1, 1)) % seq_len ans = data.new_empty((seq_len, batch_size), dtype=torch.long) ans_score, last_tags = vscore.max(1) ans[idxes[0], batch_idx] = last_tags for i in range(seq_len - 1): last_tags = vpath[idxes[i], batch_idx, last_tags] ans[idxes[i + 1], batch_idx] = last_tags if get_score: return ans_score, ans.transpose(0, 1) return ans.transpose(0, 1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'tag_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_exp_max_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x2 = xindex // 16 x1 = xindex // 4 % 4 x0 = xindex % 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr3 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp1 = tl_math.log(tmp0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + x2, xmask) tmp24 = tl.load(in_out_ptr0 + x2, xmask) tmp26 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr3 + (x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tmp1 - tmp6 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp3 - tmp6 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp15 = tmp5 - tmp6 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp18 + tmp6 tmp21 = tmp19 * tmp20 tmp23 = tl_math.log(tmp22) tmp25 = tmp23 + tmp24 tmp27 = tmp25 * tmp26 tmp29 = 1.0 tmp30 = tmp29 - tmp26 tmp31 = tmp28 * tmp30 tmp32 = tmp27 + tmp31 tmp33 = tmp29 - tmp20 tmp34 = tmp32 * tmp33 tmp35 = tmp21 + tmp34 tl.store(in_out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_add_exp_max_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__to_copy_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + (x1 + 4 * y0), tmp1, xmask & ymask) @triton.jit def triton_poi_fused_slice_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 3 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 + x1 + 4 * y0), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__to_copy_slice_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 3 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_exp_index_max_mul_sub_sum_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr4 + x0, xmask) tmp20 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr4 + (4 + x0), xmask) tmp45 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr1 + (2 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp56 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp58 = tl.load(in_ptr4 + (8 + x0), xmask) tmp67 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last') tmp69 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last') tmp72 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last') tmp77 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp79 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp82 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp86 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp88 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp91 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp95 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp97 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp100 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp117 = tl.load(in_ptr4 + (12 + x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp7 = tmp6 + tmp1 tmp8 = tmp6 < 0 tmp9 = tl.where(tmp8, tmp7, tmp6) tl.device_assert((0 <= tmp9) & (tmp9 < 4) | ~xmask, 'index out of bounds: 0 <= tmp9 < 4') tmp11 = tl.load(in_ptr2 + (tmp9 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp13 = tmp11 * tmp12 tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tl.device_assert((0 <= tmp17) & (tmp17 < 4) | ~xmask, 'index out of bounds: 0 <= tmp17 < 4') tmp19 = tl.load(in_ptr5 + (tmp17 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tmp19 * tmp20 tmp22 = tmp13 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tl.device_assert((0 <= tmp26) & (tmp26 < 4) | ~xmask, 'index out of bounds: 0 <= tmp26 < 4') tmp29 = tmp28 + tmp1 tmp30 = tmp28 < 0 tmp31 = tl.where(tmp30, tmp29, tmp28) tl.device_assert((0 <= tmp31) & (tmp31 < 4) | ~xmask, 'index out of bounds: 0 <= tmp31 < 4') tmp33 = tl.load(in_ptr2 + (tmp31 + 4 * tmp26), xmask, eviction_policy= 'evict_last') tmp35 = tmp33 * tmp34 tmp37 = tmp36 + tmp1 tmp38 = tmp36 < 0 tmp39 = tl.where(tmp38, tmp37, tmp36) tl.device_assert((0 <= tmp39) & (tmp39 < 4) | ~xmask, 'index out of bounds: 0 <= tmp39 < 4') tmp41 = tl.load(in_ptr5 + (4 + tmp39 + 16 * x0), xmask, eviction_policy ='evict_last') tmp42 = tmp41 * tmp12 tmp43 = tmp35 + tmp42 tmp44 = tmp22 + tmp43 tmp46 = tmp45 + tmp1 tmp47 = tmp45 < 0 tmp48 = tl.where(tmp47, tmp46, tmp45) tl.device_assert((0 <= tmp48) & (tmp48 < 4) | ~xmask, 'index out of bounds: 0 <= tmp48 < 4') tmp51 = tmp50 + tmp1 tmp52 = tmp50 < 0 tmp53 = tl.where(tmp52, tmp51, tmp50) tl.device_assert((0 <= tmp53) & (tmp53 < 4) | ~xmask, 'index out of bounds: 0 <= tmp53 < 4') tmp55 = tl.load(in_ptr2 + (tmp53 + 4 * tmp48), xmask, eviction_policy= 'evict_last') tmp57 = tmp55 * tmp56 tmp59 = tmp58 + tmp1 tmp60 = tmp58 < 0 tmp61 = tl.where(tmp60, tmp59, tmp58) tl.device_assert((0 <= tmp61) & (tmp61 < 4) | ~xmask, 'index out of bounds: 0 <= tmp61 < 4') tmp63 = tl.load(in_ptr5 + (8 + tmp61 + 16 * x0), xmask, eviction_policy ='evict_last') tmp64 = tmp63 * tmp34 tmp65 = tmp57 + tmp64 tmp66 = tmp44 + tmp65 tmp68 = tl_math.log(tmp67) tmp70 = tmp68 + tmp69 tmp71 = tmp70 * tmp56 tmp73 = 1.0 tmp74 = tmp73 - tmp56 tmp75 = tmp72 * tmp74 tmp76 = tmp71 + tmp75 tmp78 = tl_math.log(tmp77) tmp80 = tmp78 + tmp79 tmp81 = tmp80 * tmp56 tmp83 = tmp82 * tmp74 tmp84 = tmp81 + tmp83 tmp85 = triton_helpers.maximum(tmp76, tmp84) tmp87 = tl_math.log(tmp86) tmp89 = tmp87 + tmp88 tmp90 = tmp89 * tmp56 tmp92 = tmp91 * tmp74 tmp93 = tmp90 + tmp92 tmp94 = triton_helpers.maximum(tmp85, tmp93) tmp96 = tl_math.log(tmp95) tmp98 = tmp96 + tmp97 tmp99 = tmp98 * tmp56 tmp101 = tmp100 * tmp74 tmp102 = tmp99 + tmp101 tmp103 = triton_helpers.maximum(tmp94, tmp102) tmp104 = tmp76 - tmp103 tmp105 = tl_math.exp(tmp104) tmp106 = tmp84 - tmp103 tmp107 = tl_math.exp(tmp106) tmp108 = tmp105 + tmp107 tmp109 = tmp93 - tmp103 tmp110 = tl_math.exp(tmp109) tmp111 = tmp108 + tmp110 tmp112 = tmp102 - tmp103 tmp113 = tl_math.exp(tmp112) tmp114 = tmp111 + tmp113 tmp115 = tl_math.log(tmp114) tmp116 = tmp115 + tmp103 tmp118 = tmp117 + tmp1 tmp119 = tmp117 < 0 tmp120 = tl.where(tmp119, tmp118, tmp117) tl.device_assert((0 <= tmp120) & (tmp120 < 4) | ~xmask, 'index out of bounds: 0 <= tmp120 < 4') tmp122 = tl.load(in_ptr5 + (12 + tmp120 + 16 * x0), xmask, eviction_policy='evict_last') tmp123 = tmp122 * tmp56 tmp124 = tmp123 * tmp56 tmp125 = tmp66 + tmp124 tmp126 = tmp116 - tmp125 tl.store(in_out_ptr0 + x0, tmp126, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_max_sub_sum_0[grid(16)](primals_1, primals_4, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_1[grid(64)](buf1, buf0, primals_3, primals_1, primals_4, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 triton_poi_fused_add_mul_2[grid(16)](buf3, buf2, primals_3, buf1, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 buf4 = buf1 del buf1 buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused_add_exp_max_sub_sum_3[grid(16)](buf3, primals_1, primals_4, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused__to_copy_4[grid(4, 4)](primals_2, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf10 = empty_strided_cuda((3, 4), (1, 3), torch.int64) triton_poi_fused_slice_5[grid(3, 4)](buf8, buf10, 3, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((3, 4), (1, 3), torch.int64) triton_poi_fused__to_copy_slice_6[grid(3, 4)](buf8, buf9, 3, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4,), (1,), torch.float32) buf12 = buf11 del buf11 triton_poi_fused_add_exp_index_max_mul_sub_sum_7[grid(4)](buf12, buf9, buf10, primals_4, primals_3, buf8, primals_1, buf5, buf4, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf3 del buf4 del buf5 del buf8 return buf12, primals_1, primals_3, primals_4, buf9, buf10 def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) def log_sum_exp(x, dim=-1): max_value, _ = x.max(dim=dim, keepdim=True) res = torch.log(torch.sum(torch.exp(x - max_value), dim=dim, keepdim=True) ) + max_value return res.squeeze(dim) class ConditionalRandomFieldNew(nn.Module): def __init__(self, tag_size, include_start_end_trans=False, initial_method=None): """ :param tag_size: int, num of tags :param include_start_end_trans: bool, whether to include start/end tag """ super(ConditionalRandomFieldNew, self).__init__() self.include_start_end_trans = include_start_end_trans self.tag_size = tag_size self.trans_m = nn.Parameter(torch.randn(tag_size, tag_size)) if self.include_start_end_trans: self.start_scores = nn.Parameter(torch.randn(tag_size)) self.end_scores = nn.Parameter(torch.randn(tag_size)) initial_parameter(self, initial_method) def reset_parameter(self): nn.init.xavier_normal_(self.trans_m) if self.include_start_end_trans: nn.init.normal_(self.start_scores) nn.init.normal_(self.end_scores) def _normalizer_likelihood(self, logits, mask): """ Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences. :param logits:FloatTensor, max_len x batch_size x tag_size :param mask:ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, n_tags = logits.size() alpha = logits[0] if self.include_start_end_trans: alpha += self.start_scores.view(1, -1) for i in range(1, seq_len): emit_score = logits[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags) tmp = alpha.view(batch_size, n_tags, 1) + emit_score + trans_score alpha = log_sum_exp(tmp, 1) * mask[i].view(batch_size, 1 ) + alpha * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: alpha += self.end_scores.view(1, -1) return log_sum_exp(alpha, 1) def _glod_score(self, logits, tags, mask): """ Compute the score for the gold path. :param logits: FloatTensor, max_len x batch_size x tag_size :param tags: LongTensor, max_len x batch_size :param mask: ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, _ = logits.size() batch_idx = torch.arange(batch_size, dtype=torch.long, device= logits.device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=logits.device) trans_score = self.trans_m[tags[:seq_len - 1], tags[1:]] * mask[1:, :] emit_score = logits[seq_idx.view(-1, 1), batch_idx.view(1, -1), tags ] * mask score = trans_score + emit_score[:seq_len - 1, :] score = score.sum(0) + emit_score[-1] * mask[-1] if self.include_start_end_trans: st_scores = self.start_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[0]] last_idx = mask.long().sum(0) - 1 ed_scores = self.end_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[last_idx, batch_idx]] score += st_scores + ed_scores return score def viterbi_decode(self, data, mask, get_score=False): """ Given a feats matrix, return best decode path and best score. :param data:FloatTensor, batch_size x max_len x tag_size :param mask:ByteTensor batch_size x max_len :param get_score: bool, whether to output the decode score. :return: scores, paths """ batch_size, seq_len, n_tags = data.size() data = data.transpose(0, 1).data mask = mask.transpose(0, 1).data.float() vpath = data.new_zeros((seq_len, batch_size, n_tags), dtype=torch.long) vscore = data[0] if self.include_start_end_trans: vscore += self.start_scores.view(1, -1) for i in range(1, seq_len): prev_score = vscore.view(batch_size, n_tags, 1) cur_score = data[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags).data score = prev_score + trans_score + cur_score best_score, best_dst = score.max(1) vpath[i] = best_dst vscore = best_score * mask[i].view(batch_size, 1) + vscore * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: vscore += self.end_scores.view(1, -1) batch_idx = torch.arange(batch_size, dtype=torch.long, device=data. device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=data.device) lens = mask.long().sum(0) - 1 idxes = (lens.view(1, -1) - seq_idx.view(-1, 1)) % seq_len ans = data.new_empty((seq_len, batch_size), dtype=torch.long) ans_score, last_tags = vscore.max(1) ans[idxes[0], batch_idx] = last_tags for i in range(seq_len - 1): last_tags = vpath[idxes[i], batch_idx, last_tags] ans[idxes[i + 1], batch_idx] = last_tags if get_score: return ans_score, ans.transpose(0, 1) return ans.transpose(0, 1) def forward(self, input_0, input_1, input_2): primals_2 = self.trans_m primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
FengZiYjun/fastNLP
ConditionalRandomField
false
5,180
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
STFullyConnected
import time import torch import numpy as np from torch import nn from torch import optim from torch.nn import functional as F class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class STFullyConnected(Base): """Single task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_class (int): the No. of columns (classes) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_class, is_reg=False): super(STFullyConnected, self).__init__() self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.fc3 = nn.Linear(2000, n_class) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() elif n_class == 1: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() else: self.criterion = nn.CrossEntropyLoss() self.activation = nn.Softmax() self def forward(self, X, istrain=False): """Invoke the class directly as a function Arguments: X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features. istrain (bool, optional): is it invoked during training process (True) or just for prediction (False) Return: y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of classes """ y = F.relu(self.fc0(X)) if istrain: y = self.dropout(y) y = F.relu(self.fc1(y)) if istrain: y = self.dropout(y) y = F.relu(self.fc2(y)) if istrain: y = self.dropout(y) if self.is_reg: y = self.fc3(y) else: y = self.activation(self.fc3(y)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dim': 4, 'n_class': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import time import numpy as np from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8000 x1 = xindex // 8000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 8064 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4000 x1 = xindex // 4000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = xindex // 2000 tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask) tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8000, 4), (4, 1)) assert_size_stride(primals_2, (8000,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4000, 8000), (8000, 1)) assert_size_stride(primals_5, (4000,), (1,)) assert_size_stride(primals_6, (2000, 4000), (4000, 1)) assert_size_stride(primals_7, (2000,), (1,)) assert_size_stride(primals_8, (4, 2000), (2000, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0) del buf0 buf11 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1 ), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512000)](buf1, primals_2, buf11, 512000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0) del buf2 buf10 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256000)](buf3, primals_5, buf10, 256000, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0) del buf4 buf9 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(128000)](buf5, primals_7, buf9, 128000, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_4[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0 ), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11 class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class STFullyConnectedNew(Base): """Single task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_class (int): the No. of columns (classes) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_class, is_reg=False): super(STFullyConnectedNew, self).__init__() self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.fc3 = nn.Linear(2000, n_class) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() elif n_class == 1: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() else: self.criterion = nn.CrossEntropyLoss() self.activation = nn.Softmax() self def forward(self, input_0): primals_1 = self.fc0.weight primals_2 = self.fc0.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
EXYNOS-999/DrugEx
STFullyConnected
false
5,181
[ "MIT" ]
1
f75a90fbc0b9863d594fbff6afecb0f866c076d6
https://github.com/EXYNOS-999/DrugEx/tree/f75a90fbc0b9863d594fbff6afecb0f866c076d6
MLP_model
import torch import torch.nn as nn class MLP_model(nn.Module): def __init__(self, inputsize, layer1, layer2, layer3, device): super().__init__() self.fc1 = nn.Linear(inputsize, layer1) self.fc2 = nn.Linear(layer1, layer2) self.fc3 = nn.Linear(layer2, layer3) self.fc4 = nn.Linear(layer3, 1) self.device = device def forward(self, our_data): """ our_data: [batch_size,1,4000]:[256,4000] output:[256,1] """ mlp_output = nn.functional.relu(self.fc1(our_data)) mlp_output = nn.functional.relu(self.fc2(mlp_output)) mlp_output = nn.functional.relu(self.fc3(mlp_output)) forecast_y = self.fc4(mlp_output) return forecast_y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputsize': 4, 'layer1': 1, 'layer2': 1, 'layer3': 1, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 1), (1, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, primals_2, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1), (1, 0), 0), primals_4, out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf3, primals_5, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 1), (1, 0), 0), primals_6, out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf5, primals_7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 1), ( 1, 0), 0), primals_8, alpha=1, beta=1, out=buf7) del primals_9 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), reinterpret_tensor( buf3, (64, 1), (1, 1), 0), reinterpret_tensor(buf5, (64, 1), (1, 1), 0 ), primals_8, buf8, primals_6, buf9, primals_4, buf10 class MLP_modelNew(nn.Module): def __init__(self, inputsize, layer1, layer2, layer3, device): super().__init__() self.fc1 = nn.Linear(inputsize, layer1) self.fc2 = nn.Linear(layer1, layer2) self.fc3 = nn.Linear(layer2, layer3) self.fc4 = nn.Linear(layer3, 1) self.device = device def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
GYMS-PKU/HIgh-Frequency-Predictor
MLP_model
false
5,182
[ "Apache-2.0" ]
1
aac5efa73d6e15d95d1b99d529dcf639fb8181f4
https://github.com/GYMS-PKU/HIgh-Frequency-Predictor/tree/aac5efa73d6e15d95d1b99d529dcf639fb8181f4
_MLP_B
import torch import torch.nn as nn class _MLP_B(nn.Module): """MLP that only use age gender MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_B, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X): out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'drop_rate': 0.5, 'fil_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_3, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class _MLP_BNew(nn.Module): """MLP that only use age gender MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_BNew, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
GaelKBertrand/Meliora_DeepLearning
_MLP_B
false
5,183
[ "MIT" ]
1
5618e01066d4d0afcd7dfe074dda91af22b5857c
https://github.com/GaelKBertrand/Meliora_DeepLearning/tree/5618e01066d4d0afcd7dfe074dda91af22b5857c
GaussianActorNet
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianActorNet(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, action_scale=1.0, action_gate =None, gpu=False, unit_std=True, hidden_size=64): super(GaussianActorNet, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.action_mean = nn.Linear(hidden_size, action_dim) if unit_std: self.action_log_std = nn.Parameter(torch.zeros(1, action_dim)) else: self.action_std = nn.Linear(hidden_size, action_dim) self.unit_std = unit_std self.action_scale = action_scale self.action_gate = action_gate BasicNet.__init__(self, None, gpu, False) def forward(self, x): x = self.to_torch_variable(x) phi = F.tanh(self.fc1(x)) phi = F.tanh(self.fc2(phi)) mean = self.action_mean(phi) if self.action_gate is not None: mean = self.action_scale * self.action_gate(mean) if self.unit_std: log_std = self.action_log_std.expand_as(mean) std = log_std.exp() else: std = F.softplus(self.action_std(phi)) + 1e-05 log_std = std.log() return mean, std, log_std def predict(self, x): return self.forward(x) def log_density(self, x, mean, log_std, std): var = std.pow(2) log_density = -(x - mean).pow(2) / (2 * var + 1e-05) - 0.5 * torch.log( 2 * Variable(torch.FloatTensor([np.pi])).expand_as(x)) - log_std return log_density.sum(1) def entropy(self, std): return 0.5 * (1 + (2 * std.pow(2) * np.pi + 1e-05).log()).sum(1).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_3, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf5, reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_6, primals_4 class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianActorNetNew(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, action_scale=1.0, action_gate =None, gpu=False, unit_std=True, hidden_size=64): super(GaussianActorNetNew, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.action_mean = nn.Linear(hidden_size, action_dim) if unit_std: self.action_log_std = nn.Parameter(torch.zeros(1, action_dim)) else: self.action_std = nn.Linear(hidden_size, action_dim) self.unit_std = unit_std self.action_scale = action_scale self.action_gate = action_gate BasicNet.__init__(self, None, gpu, False) def predict(self, x): return self.forward(x) def log_density(self, x, mean, log_std, std): var = std.pow(2) log_density = -(x - mean).pow(2) / (2 * var + 1e-05) - 0.5 * torch.log( 2 * Variable(torch.FloatTensor([np.pi])).expand_as(x)) - log_std return log_density.sum(1) def entropy(self, std): return 0.5 * (1 + (2 * std.pow(2) * np.pi + 1e-05).log()).sum(1).mean() def forward(self, input_0): primals_8 = self.action_log_std primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.action_mean.weight primals_7 = self.action_mean.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1], output[2]
G-Flor/deeprl
GaussianActorNet
false
5,184
[ "Apache-2.0" ]
1
aeae2c5d585e5853dc638968b1f090eb60abd351
https://github.com/G-Flor/deeprl/tree/aeae2c5d585e5853dc638968b1f090eb60abd351
_MLP_C
import torch import torch.nn as nn class _MLP_C(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_C, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X1, X2): X = torch.cat((X1, X2), 1) out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'drop_rate': 0.5, 'fil_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (2, 4), (4, 1)) assert_size_stride(primals_6, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_4, buf2, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 buf4 = empty_strided_cuda((128, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return reinterpret_tensor(buf4, (4, 8, 4, 2), (64, 8, 2, 1), 0 ), reinterpret_tensor(buf0, (128, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), primals_5 class _MLP_CNew(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_CNew, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
GaelKBertrand/Meliora_DeepLearning
_MLP_C
false
5,185
[ "MIT" ]
1
5618e01066d4d0afcd7dfe074dda91af22b5857c
https://github.com/GaelKBertrand/Meliora_DeepLearning/tree/5618e01066d4d0afcd7dfe074dda91af22b5857c
TransformerEncoderLayer
import math import torch import torch.nn.functional as F from torch import nn def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerEncoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(p=dropout) def forward(self, tensor, mask): tensor = tensor + self.dropout(self.attention(tensor, mask=mask)) tensor = _normalize(tensor, self.norm1) tensor = tensor + self.dropout(self.ffn(tensor)) tensor = _normalize(tensor, self.norm2) tensor *= mask.unsqueeze(-1).float() return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_4, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_8, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_6, buf5, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(64)](primals_2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18, primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, primals_2, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_2, primals_11, primals_17, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor( buf20, (16, 4), (4, 1), 0), primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerEncoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0, input_1): primals_2 = self.attention.q_lin.weight primals_4 = self.attention.q_lin.bias primals_3 = self.attention.k_lin.weight primals_6 = self.attention.k_lin.bias primals_5 = self.attention.v_lin.weight primals_8 = self.attention.v_lin.bias primals_7 = self.attention.out_lin.weight primals_10 = self.attention.out_lin.bias primals_11 = self.norm1.weight primals_12 = self.norm1.bias primals_9 = self.ffn.lin1.weight primals_14 = self.ffn.lin1.bias primals_13 = self.ffn.lin2.weight primals_16 = self.ffn.lin2.bias primals_17 = self.norm2.weight primals_18 = self.norm2.bias primals_1 = input_0 primals_15 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
FrankVerhoef/Persona-Dialogue-Generation
TransformerEncoderLayer
false
5,186
[ "MIT" ]
1
ffd8413c2e8b6446097902dd1c496aeb24b852b4
https://github.com/FrankVerhoef/Persona-Dialogue-Generation/tree/ffd8413c2e8b6446097902dd1c496aeb24b852b4
ResidualDenseBlock
import torch import torch.nn as nn class ResidualDenseBlock(nn.Module): def __init__(self, channels=64, kernel_size=3, growth=32): super().__init__() self.conv2d_1 = self.conv2d(channels, growth, kernel_size, growth, 0) self.conv2d_2 = self.conv2d(channels, growth, kernel_size, growth, 1) self.conv2d_3 = self.conv2d(channels, growth, kernel_size, growth, 2) self.conv2d_4 = self.conv2d(channels, growth, kernel_size, growth, 3) self.conv2d_5 = self.conv2d(channels, channels, kernel_size, growth, 4) self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) @staticmethod def conv2d(in_channels, out_channels, kernel_size, growth, factor): return nn.Conv2d(in_channels=in_channels + factor * growth, out_channels=out_channels, kernel_size=kernel_size, padding= kernel_size // 2) def forward(self, input_data): x1 = self.relu(self.conv2d_1(input_data)) x2 = self.relu(self.conv2d_2(torch.cat((input_data, x1), 1))) x3 = self.relu(self.conv2d_3(torch.cat((input_data, x1, x2), 1))) x4 = self.relu(self.conv2d_4(torch.cat((input_data, x1, x2, x3), 1))) x5 = self.conv2d_5(torch.cat((input_data, x1, x2, x3, x4), 1)) return input_data + x5 * 0.2 def get_inputs(): return [torch.rand([4, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 96 x0 = xindex % 4096 x2 = xindex // 393216 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 96, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-64 + x1), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp6, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp5, tmp18) tl.store(out_ptr0 + x3, tmp19, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 128 x0 = xindex % 4096 x2 = xindex // 524288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + (-64 + x1), tmp9, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tl.full([1], 128, tl.int64) tmp23 = tl.load(in_ptr3 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp20, other=0.0) tmp24 = tl.load(in_ptr4 + (-96 + x1), tmp20, eviction_policy= 'evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tmp25 > tmp13 tmp27 = tmp25 * tmp15 tmp28 = tl.where(tmp26, tmp25, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp20, tmp28, tmp29) tmp31 = tl.where(tmp9, tmp19, tmp30) tmp32 = tl.where(tmp4, tmp5, tmp31) tl.store(out_ptr0 + x3, tmp32, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 160 x0 = xindex % 4096 x2 = xindex // 655360 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + (-64 + x1), tmp9, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 128, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr3 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp23, other=0.0) tmp25 = tl.load(in_ptr4 + (-96 + x1), tmp23, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp13 tmp28 = tmp26 * tmp15 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp0 >= tmp21 tl.full([1], 160, tl.int64) tmp35 = tl.load(in_ptr5 + (x0 + 4096 * (-128 + x1) + 131072 * x2), tmp32, other=0.0) tmp36 = tl.load(in_ptr6 + (-128 + x1), tmp32, eviction_policy= 'evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tmp37 > tmp13 tmp39 = tmp37 * tmp15 tmp40 = tl.where(tmp38, tmp37, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp32, tmp40, tmp41) tmp43 = tl.where(tmp23, tmp31, tmp42) tmp44 = tl.where(tmp9, tmp19, tmp43) tmp45 = tl.where(tmp4, tmp5, tmp44) tl.store(out_ptr0 + x3, tmp45, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 192 x0 = xindex % 4096 x2 = xindex // 786432 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + (-64 + x1), tmp9, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 128, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr3 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp23, other=0.0) tmp25 = tl.load(in_ptr4 + (-96 + x1), tmp23, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp13 tmp28 = tmp26 * tmp15 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp0 >= tmp21 tmp33 = tl.full([1], 160, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tl.load(in_ptr5 + (x0 + 4096 * (-128 + x1) + 131072 * x2), tmp35, other=0.0) tmp37 = tl.load(in_ptr6 + (-128 + x1), tmp35, eviction_policy= 'evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tmp38 > tmp13 tmp40 = tmp38 * tmp15 tmp41 = tl.where(tmp39, tmp38, tmp40) tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp35, tmp41, tmp42) tmp44 = tmp0 >= tmp33 tl.full([1], 192, tl.int64) tmp47 = tl.load(in_ptr7 + (x0 + 4096 * (-160 + x1) + 131072 * x2), tmp44, other=0.0) tmp48 = tl.load(in_ptr8 + (-160 + x1), tmp44, eviction_policy= 'evict_last', other=0.0) tmp49 = tmp47 + tmp48 tmp50 = tmp49 > tmp13 tmp51 = tmp49 * tmp15 tmp52 = tl.where(tmp50, tmp49, tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp44, tmp52, tmp53) tmp55 = tl.where(tmp35, tmp43, tmp54) tmp56 = tl.where(tmp23, tmp31, tmp55) tmp57 = tl.where(tmp9, tmp19, tmp56) tmp58 = tl.where(tmp4, tmp5, tmp57) tl.store(out_ptr0 + x3, tmp58, None) @triton.jit def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.2 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1)) assert_size_stride(primals_4, (32, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 160, 3, 3), (1440, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (64, 192, 3, 3), (1728, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 96, 64, 64), (393216, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1572864)](primals_3, buf0, primals_2, buf1, 1572864, XBLOCK=1024, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(2097152)](primals_3, buf0, primals_2, buf2, primals_5, buf3, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = empty_strided_cuda((4, 160, 64, 64), (655360, 4096, 64, 1), torch.float32) triton_poi_fused_cat_2[grid(2621440)](primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf5, 2621440, XBLOCK=1024, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = empty_strided_cuda((4, 192, 64, 64), (786432, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(3145728)](primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 3145728, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_add_convolution_mul_4[grid(1048576)](buf9, primals_3, primals_11, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf6, primals_9, buf10, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf6 del primals_9 buf11 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf4, primals_7, buf11, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf4 del primals_7 buf12 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf2, primals_5, buf12, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf2 del primals_5 buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf0, primals_2, buf13, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf0 del primals_2 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13) class ResidualDenseBlockNew(nn.Module): def __init__(self, channels=64, kernel_size=3, growth=32): super().__init__() self.conv2d_1 = self.conv2d(channels, growth, kernel_size, growth, 0) self.conv2d_2 = self.conv2d(channels, growth, kernel_size, growth, 1) self.conv2d_3 = self.conv2d(channels, growth, kernel_size, growth, 2) self.conv2d_4 = self.conv2d(channels, growth, kernel_size, growth, 3) self.conv2d_5 = self.conv2d(channels, channels, kernel_size, growth, 4) self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) @staticmethod def conv2d(in_channels, out_channels, kernel_size, growth, factor): return nn.Conv2d(in_channels=in_channels + factor * growth, out_channels=out_channels, kernel_size=kernel_size, padding= kernel_size // 2) def forward(self, input_0): primals_1 = self.conv2d_1.weight primals_2 = self.conv2d_1.bias primals_4 = self.conv2d_2.weight primals_5 = self.conv2d_2.bias primals_6 = self.conv2d_3.weight primals_7 = self.conv2d_3.bias primals_8 = self.conv2d_4.weight primals_9 = self.conv2d_4.bias primals_10 = self.conv2d_5.weight primals_11 = self.conv2d_5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Frognar/Super-Resolution
ResidualDenseBlock
false
5,187
[ "MIT" ]
1
406b909d71e156aa11ee589698744e3ad9abfee7
https://github.com/Frognar/Super-Resolution/tree/406b909d71e156aa11ee589698744e3ad9abfee7
SentenceEmbedding
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SentenceEmbedding(BaseSelfAttention): def __init__(self, embedding_dim, hidden_dim, num_annotations): super(SentenceEmbedding, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.num_annotations = num_annotations self.initialize_layers() def initialize_layers(self): self.Ws1 = nn.Linear(self.embedding_dim, self.hidden_dim) self.Ws2 = nn.Linear(self.hidden_dim, self.num_annotations) self.init_linear(self.Ws1) self.init_linear(self.Ws2) def forward(self, word_embeddings): """ Args: word_embeddings: (batch_size, doc_maxlen, embedding_dim) Output: sentence_embedding: (batch_size, num_annotations, embedding_dim) """ hidden = F.tanh(self.Ws1(word_embeddings)) atten_weights = F.softmax(self.Ws2(hidden), dim=2) atten_weights = atten_weights.transpose(1, 2) sentence_embedding = atten_weights.bmm(word_embeddings) return sentence_embedding def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embedding_dim': 4, 'hidden_dim': 4, 'num_annotations': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), primals_3, out=buf5) del buf4 return buf5, primals_3, buf1, buf2, primals_4 class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SentenceEmbeddingNew(BaseSelfAttention): def __init__(self, embedding_dim, hidden_dim, num_annotations): super(SentenceEmbeddingNew, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.num_annotations = num_annotations self.initialize_layers() def initialize_layers(self): self.Ws1 = nn.Linear(self.embedding_dim, self.hidden_dim) self.Ws2 = nn.Linear(self.hidden_dim, self.num_annotations) self.init_linear(self.Ws1) self.init_linear(self.Ws2) def forward(self, input_0): primals_1 = self.Ws1.weight primals_2 = self.Ws1.bias primals_4 = self.Ws2.weight primals_5 = self.Ws2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Gan-Tu/ganutils
SentenceEmbedding
false
5,188
[ "MIT" ]
1
203c703cbba0345f9cfe23b03e1e3981f03e43db
https://github.com/Gan-Tu/ganutils/tree/203c703cbba0345f9cfe23b03e1e3981f03e43db
GFunction
import torch import torch.nn.functional as F from torch import nn from torch import optim class GFunction(nn.Module): def __init__(self, obs_size, num_outputs=128): super().__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.last(x) return x def train_model(self, c_out, next_state): loss = nn.MSELoss()(c_out, self.forward(next_state)) loss.backward() self.optimizer.step() return loss.item() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'obs_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (128, 32), (32, 1)) assert_size_stride(primals_7, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 128), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 128), (2048, 512, 128, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6 class GFunctionNew(nn.Module): def __init__(self, obs_size, num_outputs=128): super().__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def train_model(self, c_out, next_state): loss = nn.MSELoss()(c_out, self.forward(next_state)) loss.backward() self.optimizer.step() return loss.item() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.last.weight primals_7 = self.last.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Deepest-Project/agent57_from_ngu
GFunction
false
5,189
[ "MIT" ]
1
2f596024c7538cfaa5cf63cde1b77f8a1c22d208
https://github.com/Deepest-Project/agent57_from_ngu/tree/2f596024c7538cfaa5cf63cde1b77f8a1c22d208
UpSample
import torch from torchvision.transforms import functional as F import torch.nn as nn import torch.nn.functional as F class UpSample(nn.Sequential): def __init__(self, skip_input, output_features): super().__init__() self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluA = nn.LeakyReLU(0.2) self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluB = nn.LeakyReLU(0.2) def forward(self, x, concat_with): up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size (3)], mode='bilinear', align_corners=True) return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch. cat([up_x, concat_with], dim=1))))) def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [[], {'skip_input': 4, 'output_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex // 48 x7 = xindex % 48 tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(out_ptr1 + (x7 + 64 * x4), tmp38, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf3, (4, 3, 4, 4), (64, 16, 4, 1), 0) get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (192)](primals_2, buf1, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (64, 16, 4, 1), 48) triton_poi_fused_cat_1[grid(64)](primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf4, primals_4, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf9 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf7, primals_6, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del primals_6 return buf9, primals_3, primals_5, buf3, buf5, buf6, buf8 class UpSampleNew(nn.Sequential): def __init__(self, skip_input, output_features): super().__init__() self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluA = nn.LeakyReLU(0.2) self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluB = nn.LeakyReLU(0.2) def forward(self, input_0, input_1): primals_3 = self.convA.weight primals_4 = self.convA.bias primals_5 = self.convB.weight primals_6 = self.convB.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
BlairLee/dataset-insights
UpSample
false
5,190
[ "Apache-2.0" ]
1
892e2ed3a2facf97cfa3a883700830d959a0c49b
https://github.com/BlairLee/dataset-insights/tree/892e2ed3a2facf97cfa3a883700830d959a0c49b
LastLevelMaxPool
import torch import torch.utils.data from torchvision.transforms import functional as F from torch import nn import torch.nn.functional as F class LastLevelMaxPool(nn.Module): def forward(self, x): return [F.max_pool2d(x, 1, 2, 0)] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class LastLevelMaxPoolNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CV-Rookie/EmbedMask
LastLevelMaxPool
false
5,191
[ "MIT" ]
1
3b4d9fb4e0b6112dc501708184ff684dfb45f3f0
https://github.com/CV-Rookie/EmbedMask/tree/3b4d9fb4e0b6112dc501708184ff684dfb45f3f0
SelfAttentive
import torch import torch.nn as nn from sklearn.metrics import * class SelfAttentive(nn.Module): def __init__(self, hidden_size, att_hops=1, att_unit=200, dropout=0.2): super(SelfAttentive, self).__init__() self.drop = nn.Dropout(dropout) self.ws1 = nn.Linear(hidden_size, att_unit, bias=False) self.ws2 = nn.Linear(att_unit, att_hops, bias=False) self.tanh = nn.Tanh() self.softmax = nn.Softmax() self.attention_hops = att_hops def forward(self, rnn_out, mask=None): outp = rnn_out size = outp.size() compressed_embeddings = outp.reshape(-1, size[2]) hbar = self.tanh(self.ws1(self.drop(compressed_embeddings))) alphas = self.ws2(hbar).view(size[0], size[1], -1) alphas = torch.transpose(alphas, 1, 2).contiguous() if mask is not None: mask = mask.squeeze(2) concatenated_mask = [mask for i in range(self.attention_hops)] concatenated_mask = torch.cat(concatenated_mask, 1) penalized_alphas = alphas + concatenated_mask else: penalized_alphas = alphas alphas = self.softmax(penalized_alphas.view(-1, size[1])) alphas = alphas.view(size[0], self.attention_hops, size[1]) return torch.bmm(alphas, outp), alphas def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (200, 4), (4, 1)) assert_size_stride(primals_3, (1, 200), (200, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 200), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(3200)](buf1, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_3, (200, 1), (1, 200), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0 ), primals_1, out=buf5) return buf5, reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0 ), primals_1, buf1, buf4, primals_3 class SelfAttentiveNew(nn.Module): def __init__(self, hidden_size, att_hops=1, att_unit=200, dropout=0.2): super(SelfAttentiveNew, self).__init__() self.drop = nn.Dropout(dropout) self.ws1 = nn.Linear(hidden_size, att_unit, bias=False) self.ws2 = nn.Linear(att_unit, att_hops, bias=False) self.tanh = nn.Tanh() self.softmax = nn.Softmax() self.attention_hops = att_hops def forward(self, input_0): primals_2 = self.ws1.weight primals_3 = self.ws2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Dio990521/LSTM_emo_classifier
SelfAttentive
false
5,192
[ "MIT" ]
1
aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
https://github.com/Dio990521/LSTM_emo_classifier/tree/aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
SelfAttention
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SelfAttention(BaseSelfAttention): def __init__(self, hidden_dim, scoring='general'): super(SelfAttention, self).__init__() self.scoring = scoring self.hidden_dim = hidden_dim self.initialize_layers() def initialize_layers(self): if self.scoring == 'general': self.W = nn.Linear(self.hidden_dim, self.hidden_dim) self.init_linear(self.W) elif self.scoring == 'concat': self.W = nn.Linear(2 * self.hidden_dim, self.hidden_dim) self.v = nn.Linear(self.hidden_dim, 1) self.init_linear(self.W) self.init_linear(self.v) elif self.scoring == 'dot': pass else: raise RuntimeError('Unrecognized attention scoring method: %s' % self.scoring) def forward(self, hidden_outputs): scores = self.score(hidden_outputs) context = scores.bmm(hidden_outputs) return context def score(self, hidden_outputs): if self.scoring == 'dot': H = hidden_outputs.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'general': H = self.W(hidden_outputs) H = H.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'concat': H = hidden_outputs.transpose(1, 2) scores = [] batch_size, doc_maxlen, hidden_dim = hidden_outputs.shape for doc_idx in range(H.shape[-1]): h_t = hidden_outputs[:, doc_idx, :] h_t = h_t.unsqueeze(1) h_t = h_t.repeat(1, doc_maxlen, 1) H_t = torch.cat((h_t, hidden_outputs), dim=2) H_t = self.W(H_t) H_t = torch.nn.functional.tanh(H_t) H_t = self.v(H_t) H_t = H_t.view(batch_size, doc_maxlen) scores.append(H_t) scores = torch.stack(scores) scores = scores.transpose(0, 1) scores = scores / torch.sqrt(torch.Tensor([hidden_dim])) scores = F.softmax(scores, dim=2) return scores else: raise RuntimeError('Unrecognized scoring method: %s' % self.scoring ) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), ( 16, 1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 extern_kernels.bmm(buf3, primals_3, out=buf4) return buf4, primals_3, buf3 class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SelfAttentionNew(BaseSelfAttention): def __init__(self, hidden_dim, scoring='general'): super(SelfAttentionNew, self).__init__() self.scoring = scoring self.hidden_dim = hidden_dim self.initialize_layers() def initialize_layers(self): if self.scoring == 'general': self.W = nn.Linear(self.hidden_dim, self.hidden_dim) self.init_linear(self.W) elif self.scoring == 'concat': self.W = nn.Linear(2 * self.hidden_dim, self.hidden_dim) self.v = nn.Linear(self.hidden_dim, 1) self.init_linear(self.W) self.init_linear(self.v) elif self.scoring == 'dot': pass else: raise RuntimeError('Unrecognized attention scoring method: %s' % self.scoring) def score(self, hidden_outputs): if self.scoring == 'dot': H = hidden_outputs.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'general': H = self.W(hidden_outputs) H = H.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'concat': H = hidden_outputs.transpose(1, 2) scores = [] batch_size, doc_maxlen, hidden_dim = hidden_outputs.shape for doc_idx in range(H.shape[-1]): h_t = hidden_outputs[:, doc_idx, :] h_t = h_t.unsqueeze(1) h_t = h_t.repeat(1, doc_maxlen, 1) H_t = torch.cat((h_t, hidden_outputs), dim=2) H_t = self.W(H_t) H_t = torch.nn.functional.tanh(H_t) H_t = self.v(H_t) H_t = H_t.view(batch_size, doc_maxlen) scores.append(H_t) scores = torch.stack(scores) scores = scores.transpose(0, 1) scores = scores / torch.sqrt(torch.Tensor([hidden_dim])) scores = F.softmax(scores, dim=2) return scores else: raise RuntimeError('Unrecognized scoring method: %s' % self.scoring ) def forward(self, input_0): primals_1 = self.W.weight primals_2 = self.W.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Gan-Tu/ganutils
SelfAttention
false
5,193
[ "MIT" ]
1
203c703cbba0345f9cfe23b03e1e3981f03e43db
https://github.com/Gan-Tu/ganutils/tree/203c703cbba0345f9cfe23b03e1e3981f03e43db
ArcMarginProduct
import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F class ArcMarginProduct(nn.Module): """ Process the latent vectors to output the cosine vector for the follow-up ArcFaceLoss computation. Args: in_features: the column dimension of the weights, which is identical to the dim of latent vectors. out_features: the row dimension of the weights, which is identical to the number of classes. """ def __init__(self, in_features, out_features): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features) ) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine = F.linear(F.normalize(features), F.normalize(self.weight)) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class ArcMarginProductNew(nn.Module): """ Process the latent vectors to output the cosine vector for the follow-up ArcFaceLoss computation. Args: in_features: the column dimension of the weights, which is identical to the dim of latent vectors. out_features: the row dimension of the weights, which is identical to the number of classes. """ def __init__(self, in_features, out_features): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features) ) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
CTPLab/IID_representation_learning
ArcMarginProduct
false
5,194
[ "MIT" ]
1
b9dc13536963f9af332b039f7cc772e2f1090c62
https://github.com/CTPLab/IID_representation_learning/tree/b9dc13536963f9af332b039f7cc772e2f1090c62
RingLoss
import torch import warnings import torch.nn as nn from torchvision.transforms import * class RingLoss(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self): super(RingLoss, self).__init__() warnings.warn('This method is deprecated') self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, x): loss = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import warnings import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp14 = tmp11 - tmp13 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tmp14 * tmp14 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 64.0 tmp22 = tmp20 / tmp21 tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0[grid(1)](buf3, primals_1, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf3, buf2 class RingLossNew(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self): super(RingLossNew, self).__init__() warnings.warn('This method is deprecated') self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, input_0): primals_2 = self.radius primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
DRACOyu/deep-person-reid
RingLoss
false
5,195
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
HardAttn
import torch import torch.nn as nn from torch.nn import functional as F from torchvision.transforms import * class HardAttn(nn.Module): """Hard Attention (Sec. 3.1.II)""" def __init__(self, in_channels): super(HardAttn, self).__init__() self.fc = nn.Linear(in_channels, 4 * 2) self.init_params() def init_params(self): self.fc.weight.data.zero_() self.fc.bias.data.copy_(torch.tensor([0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float)) def forward(self, x): x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1)) theta = torch.tanh(self.fc(x)) theta = theta.view(-1, 4, 2) return theta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_1[grid(32)](buf2, primals_3, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 2), (8, 2, 1), 0 ), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), buf3 class HardAttnNew(nn.Module): """Hard Attention (Sec. 3.1.II)""" def __init__(self, in_channels): super(HardAttnNew, self).__init__() self.fc = nn.Linear(in_channels, 4 * 2) self.init_params() def init_params(self): self.fc.weight.data.zero_() self.fc.bias.data.copy_(torch.tensor([0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float)) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
DRACOyu/deep-person-reid
HardAttn
false
5,196
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
BertSelfAttention
import math import torch import torch.nn as nn from sklearn.metrics import * def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class BertSelfAttention(nn.Module): """ Extracted from """ def __init__(self, hidden_size): super(BertSelfAttention, self).__init__() self.num_attention_heads = 1 self.attention_head_size = int(hidden_size / self.num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.dropout = nn.Dropout(0.2) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: if len(attention_mask.size()) < 2: attention_mask = sequence_mask(attention_mask) reverse_mask = torch.ones(attention_mask.size()) reverse_mask[attention_mask] = 0.0 attention_scores = attention_scores + reverse_mask.unsqueeze(1 ).unsqueeze(2) * -1000000000.0 else: raise NotImplementedError attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer[:, 0, :], attention_probs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf6) return reinterpret_tensor(buf6, (4, 4), (16, 1), 0 ), buf5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class BertSelfAttentionNew(nn.Module): """ Extracted from """ def __init__(self, hidden_size): super(BertSelfAttentionNew, self).__init__() self.num_attention_heads = 1 self.attention_head_size = int(hidden_size / self.num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.dropout = nn.Dropout(0.2) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Dio990521/LSTM_emo_classifier
BertSelfAttention
false
5,197
[ "MIT" ]
1
aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
https://github.com/Dio990521/LSTM_emo_classifier/tree/aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
AMCLoss
import torch import torch.nn as nn import torch.nn.functional as F class AMCLoss(nn.Module): def __init__(self, in_features, out_features, s=None, m=None, device='cuda' ): """ Angular Margin Contrastive Loss https://arxiv.org/pdf/2004.09805.pdf Code converted over from Tensorflow to Pytorch """ super(AMCLoss, self).__init__() self.m = 0.5 if not m else m self.s = 1.0 if not s else s self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) self.device = device def forward(self, X, labels=None): """ input shape (N, in_features) """ X = F.normalize(X, p=2, dim=1) batch_size = X.shape[0] wf = self.fc(X) half = int(batch_size / 2) _, target_hard = torch.max(F.softmax(wf, dim=1), 1) try: neighbor_bool = torch.eq(target_hard[:half], target_hard[half:]) inner = torch.sum(X[:half] * X[half:], axis=1) except: neighbor_bool = torch.eq(target_hard[:half + 1], target_hard[half:] ) inner = torch.sum(X[:half + 1] * X[half:], axis=1) geo_desic = torch.acos(torch.clamp(inner, -1e-07, 1e-07)) * self.s geo_losses = torch.where(neighbor_bool, torch.square(geo_desic), torch.square(F.relu(self.m - geo_desic))).clamp(min=1e-12) return torch.mean(geo_losses), wf def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp8 = tmp1 / tmp6 tmp9 = tmp7 > tmp8 tmp10 = tmp7 == tmp8 tmp11 = tmp7 != tmp7 tmp12 = tmp8 != tmp8 tmp13 = tmp11 > tmp12 tmp14 = tmp9 | tmp13 tmp15 = tmp11 & tmp12 tmp16 = tmp10 | tmp15 tmp17 = tl.full([1], 0, tl.int64) tmp18 = tl.full([1], 1, tl.int64) tmp19 = tmp17 < tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp14 | tmp20 tmp22 = tl.where(tmp21, tmp7, tmp8) tmp23 = tl.where(tmp21, tmp17, tmp18) tmp24 = tmp3 / tmp6 tmp25 = tmp22 > tmp24 tmp26 = tmp22 == tmp24 tmp27 = tmp22 != tmp22 tmp28 = tmp24 != tmp24 tmp29 = tmp27 > tmp28 tmp30 = tmp25 | tmp29 tmp31 = tmp27 & tmp28 tmp32 = tmp26 | tmp31 tmp33 = tl.full([1], 2, tl.int64) tmp34 = tmp23 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tmp30 | tmp35 tmp37 = tl.where(tmp36, tmp22, tmp24) tmp38 = tl.where(tmp36, tmp23, tmp33) tmp39 = tmp5 / tmp6 tmp40 = tmp37 > tmp39 tmp41 = tmp37 == tmp39 tmp42 = tmp37 != tmp37 tmp43 = tmp39 != tmp39 tmp44 = tmp42 > tmp43 tmp45 = tmp40 | tmp44 tmp46 = tmp42 & tmp43 tmp47 = tmp41 | tmp46 tmp48 = tl.full([1], 3, tl.int64) tmp49 = tmp38 < tmp48 tmp50 = tmp47 & tmp49 tmp51 = tmp45 | tmp50 tl.where(tmp51, tmp37, tmp39) tmp53 = tl.where(tmp51, tmp38, tmp48) tl.store(out_ptr0 + x2, tmp53, xmask) @triton.jit def triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (128 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (144 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (160 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr0 + (176 + r0 + 64 * r1), None) tmp22 = tl.load(in_ptr1 + r2, None) tmp23 = tl.load(in_ptr1 + (32 + r2), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -1e-07 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = 1e-07 tmp18 = triton_helpers.minimum(tmp16, tmp17) tmp19 = libdevice.acos(tmp18) tmp20 = 1.0 tmp21 = tmp19 * tmp20 tmp24 = tmp22 == tmp23 tmp25 = tmp21 * tmp21 tmp26 = 0.5 tmp27 = tmp26 - tmp21 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = tmp29 * tmp29 tmp31 = tl.where(tmp24, tmp25, tmp30) tmp32 = 1e-12 tmp33 = triton_helpers.maximum(tmp31, tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 32.0 tmp38 = tmp36 / tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused__softmax_max_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3[grid (1)](buf6, buf0, buf3, 1, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf3 return buf6, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class AMCLossNew(nn.Module): def __init__(self, in_features, out_features, s=None, m=None, device='cuda' ): """ Angular Margin Contrastive Loss https://arxiv.org/pdf/2004.09805.pdf Code converted over from Tensorflow to Pytorch """ super(AMCLossNew, self).__init__() self.m = 0.5 if not m else m self.s = 1.0 if not s else s self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) self.device = device def forward(self, input_0): primals_2 = self.fc.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
GatorSense/LACE
AMCLoss
false
5,198
[ "MIT" ]
1
ee8194bc443886642f22c2317f5bdef23bba5147
https://github.com/GatorSense/LACE/tree/ee8194bc443886642f22c2317f5bdef23bba5147
AvgPoolPad
import torch import torch.nn as nn from torchvision.transforms import * class AvgPoolPad(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * ( 0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + ( -1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + x4, tmp93, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class AvgPoolPadNew(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
DRACOyu/deep-person-reid
AvgPoolPad
false
5,199
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
EmbeddingModel
import torch import torch.nn.functional as F from torch import nn from torch import optim class EmbeddingModel(nn.Module): def __init__(self, obs_size, num_outputs): super(EmbeddingModel, self).__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32 * 2, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def forward(self, x1, x2): x1 = self.embedding(x1) x2 = self.embedding(x2) x = torch.cat([x1, x2], dim=2) x = self.last(x) return nn.Softmax(dim=2)(x) def embedding(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return x def train_model(self, batch): batch_size = torch.stack(batch.state).size()[0] states = torch.stack(batch.state).view(batch_size, config. sequence_length, self.obs_size)[:, -5:, :] next_states = torch.stack(batch.next_state).view(batch_size, config .sequence_length, self.obs_size)[:, -5:, :] actions = torch.stack(batch.action).view(batch_size, config. sequence_length, -1).long()[:, -5:, :] self.optimizer.zero_grad() net_out = self.forward(states, next_states) actions_one_hot = torch.squeeze(F.one_hot(actions, self.num_outputs) ).float() loss = nn.MSELoss()(net_out, actions_one_hot) loss.backward() self.optimizer.step() return loss.item() def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'obs_size': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp10 = tmp9 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(in_out_ptr1 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp15 = tl.load(in_ptr2 + (32 * x1 + (-32 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (-32 + x0), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp10 = tmp9 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 64), (64, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) buf3 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf3) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 32), (128, 32, 1), 0) del buf0 buf13 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) buf4 = reinterpret_tensor(buf3, (4, 4, 32), (128, 32, 1), 0) del buf3 buf11 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, buf4, primals_2, buf13, buf11, 512, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_2 buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf5 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(1024)](buf2, primals_5, buf5, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf6, (16, 64), (64, 1), 0), reinterpret_tensor(primals_7, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_8 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0) del buf7 triton_poi_fused__softmax_3[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 buf10 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) buf12 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(512)](buf5, primals_5, buf2, buf10, buf12, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf5 del primals_5 return buf9, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor( primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 32), (32, 1), 0), reinterpret_tensor(buf6, (16, 64), (64, 1), 0 ), buf9, primals_7, buf10, primals_4, buf11, buf12, buf13 class EmbeddingModelNew(nn.Module): def __init__(self, obs_size, num_outputs): super(EmbeddingModelNew, self).__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32 * 2, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def embedding(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return x def train_model(self, batch): batch_size = torch.stack(batch.state).size()[0] states = torch.stack(batch.state).view(batch_size, config. sequence_length, self.obs_size)[:, -5:, :] next_states = torch.stack(batch.next_state).view(batch_size, config .sequence_length, self.obs_size)[:, -5:, :] actions = torch.stack(batch.action).view(batch_size, config. sequence_length, -1).long()[:, -5:, :] self.optimizer.zero_grad() net_out = self.forward(states, next_states) actions_one_hot = torch.squeeze(F.one_hot(actions, self.num_outputs) ).float() loss = nn.MSELoss()(net_out, actions_one_hot) loss.backward() self.optimizer.step() return loss.item() def forward(self, input_0, input_1): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_7 = self.last.weight primals_8 = self.last.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Deepest-Project/agent57_from_ngu
EmbeddingModel
false
5,200
[ "MIT" ]
1
2f596024c7538cfaa5cf63cde1b77f8a1c22d208
https://github.com/Deepest-Project/agent57_from_ngu/tree/2f596024c7538cfaa5cf63cde1b77f8a1c22d208
_ScaledDotProductAttention
import torch import torch.nn as nn class _ScaledDotProductAttention(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super().__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.permute(0, 2, 1)) if self.scale: dimension = torch.sqrt(torch.tensor(k.shape[-1])) attn = attn / dimension if mask is not None: attn = attn.masked_fill(mask, -1000000000.0) attn = self.softmax(attn) if self.dropout is not None: attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 2.0 tmp2 = 0.0 tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6 * tmp1 tmp21 = tmp19 / tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class _ScaledDotProductAttentionNew(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super().__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
Gian-Wiher/darts
_ScaledDotProductAttention
false
5,201
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
Fire
import torch import torch.nn as nn from torchvision.transforms import * class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand1x1_planes': 4, 'expand3x3_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp12 & xmask, other=0.0) tmp16 = tl.load(in_ptr3 + (-4 + x1), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf2, primals_5, buf3, primals_7, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf3, primals_7, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf2, primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_5 return buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf5, buf6 class FireNew(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(FireNew, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.squeeze.weight primals_2 = self.squeeze.bias primals_4 = self.expand1x1.weight primals_5 = self.expand1x1.bias primals_6 = self.expand3x3.weight primals_7 = self.expand3x3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
DRACOyu/deep-person-reid
Fire
false
5,202
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
ToRGB
from torch.autograd import Function import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_2[grid(48)](primals_5, buf2, buf3, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
CTPLab/IID_representation_learning
ToRGB
false
5,203
[ "MIT" ]
1
b9dc13536963f9af332b039f7cc772e2f1090c62
https://github.com/CTPLab/IID_representation_learning/tree/b9dc13536963f9af332b039f7cc772e2f1090c62
MaxPoolPad
import torch import torch.nn as nn from torchvision.transforms import * class MaxPoolPad(nn.Module): def __init__(self): super(MaxPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + x4, tmp91, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)]( arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class MaxPoolPadNew(nn.Module): def __init__(self): super(MaxPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
DRACOyu/deep-person-reid
MaxPoolPad
false
5,204
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
_GatedLinearUnit
import torch import torch.nn as nn import torch.nn.functional as F class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_glu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_glu_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) class _GatedLinearUnitNew(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Gian-Wiher/darts
_GatedLinearUnit
false
5,205
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
_AddNorm
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr2 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp18 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + 2) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr2 + 3) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp1 * tmp4 tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tmp0 + tmp7 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp10 * tmp13 tmp15 = tmp14 * tmp6 tmp16 = tmp9 + tmp15 tmp17 = tmp8 + tmp16 tmp22 = tl.sigmoid(tmp21) tmp23 = tmp19 * tmp22 tmp24 = tmp23 * tmp6 tmp25 = tmp18 + tmp24 tmp26 = tmp17 + tmp25 tmp31 = tl.sigmoid(tmp30) tmp32 = tmp28 * tmp31 tmp33 = tmp32 * tmp6 tmp34 = tmp27 + tmp33 tmp35 = tmp26 + tmp34 tmp36 = 4.0 tmp37 = tmp35 / tmp36 tmp38 = tmp8 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp16 - tmp37 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp25 - tmp37 tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = tmp34 - tmp37 tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp48 / tmp36 tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp49, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp9 = tmp7 - tmp8 tmp11 = 1e-05 tmp12 = tmp10 + tmp11 tmp13 = libdevice.rsqrt(tmp12) tmp14 = tmp9 * tmp13 tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0[grid(64)]( primals_3, primals_2, primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(256)]( primals_3, primals_2, primals_1, buf0, buf1, primals_4, primals_5, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_5 return buf2, primals_1, primals_2, primals_3, primals_4 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNormNew(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, input_0, input_1): primals_1 = self.mask primals_4 = self.norm.weight primals_5 = self.norm.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Gian-Wiher/darts
_AddNorm
false
5,206
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
EqualLinear
from torch.autograd import Function import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CTPLab/IID_representation_learning
EqualLinear
false
5,207
[ "MIT" ]
1
b9dc13536963f9af332b039f7cc772e2f1090c62
https://github.com/CTPLab/IID_representation_learning/tree/b9dc13536963f9af332b039f7cc772e2f1090c62
_ResampleNorm
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp21 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp7 * tmp10 tmp12 = tmp11 * tmp5 tmp13 = tmp6 + tmp12 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp14 * tmp17 tmp19 = tmp18 * tmp5 tmp20 = tmp13 + tmp19 tmp24 = tl.sigmoid(tmp23) tmp25 = tmp21 * tmp24 tmp26 = tmp25 * tmp5 tmp27 = tmp20 + tmp26 tmp28 = 4.0 tmp29 = tmp27 / tmp28 tmp30 = tmp6 - tmp29 tmp31 = tmp30 * tmp30 tmp32 = tmp12 - tmp29 tmp33 = tmp32 * tmp32 tmp34 = tmp31 + tmp33 tmp35 = tmp19 - tmp29 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp38 = tmp26 - tmp29 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp40 / tmp28 tl.store(out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + x0, tmp41, xmask) @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_mul_native_layer_norm_sigmoid_0[grid(64)](primals_2, primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_native_layer_norm_sigmoid_1[grid(256)](primals_2, primals_1, buf0, buf1, primals_3, primals_4, buf2, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_4 return buf2, primals_1, primals_2, primals_3 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _ResampleNormNew(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, input_0): primals_1 = self.mask primals_3 = self.norm.weight primals_4 = self.norm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Gian-Wiher/darts
_ResampleNorm
false
5,208
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
TransformerDecoderLayer
import math import torch import torch.nn.functional as F from torch import nn def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerDecoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.dropout = nn.Dropout(p=dropout) self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def forward(self, x, encoder_output, encoder_mask): decoder_mask = self._create_selfattn_mask(x) residual = x x = self.self_attention(query=x, mask=decoder_mask) x = self.dropout(x) x = x + residual x = _normalize(x, self.norm1) residual = x x = self.encoder_attention(query=x, key=encoder_output, value= encoder_output, mask=encoder_mask) x = self.dropout(x) x = residual + x x = _normalize(x, self.norm2) residual = x x = self.ffn(x) x = self.dropout(x) x = residual + x x = _normalize(x, self.norm3) return x def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex tmp0 = x0 + -1 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp4 tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_13(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4, 4), (4, 1)) assert_size_stride(primals_25, (4,), (1,)) assert_size_stride(primals_26, (4, 4), (4, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_7, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_5, buf5, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(256)](buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_9 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](buf13, primals_1, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](buf13, primals_1, buf14, buf15, primals_10, primals_11, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf17, primals_15, buf18, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_15 buf19 = buf17 del buf17 extern_kernels.mm(reinterpret_tensor(primals_13, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf19) del primals_16 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_13, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf20) del primals_18 buf21 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf20, primals_19, buf21, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_19 buf22 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf20 triton_poi_fused_clone_0[grid(16, 4)](buf19, primals_17, buf22, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_17 buf23 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 0, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_7[grid(64)](primals_12, buf24, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_12 buf25 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 64), 0) del buf19 buf26 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_8[grid(64)](buf24, buf23, buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = buf23 del buf23 triton_poi_fused__softmax_masked_fill_9[grid(256)](buf27, buf24, buf25, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1) buf28 = reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 1), 0) del buf26 extern_kernels.bmm(buf27, reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf25 triton_poi_fused_clone_4[grid(16, 4)](buf28, buf29, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0) del buf28 extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0) del buf30 triton_poi_fused_add_10[grid(64)](buf31, buf16, primals_21, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_21 buf32 = buf15 del buf15 buf33 = buf14 del buf14 triton_poi_fused_native_layer_norm_11[grid(16)](buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf31, buf32, buf33, primals_22, primals_23, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_23 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf34, reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0) del buf35 buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_13[grid(64)](buf36, primals_25, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_25 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0) del buf37 triton_poi_fused_add_10[grid(64)](buf38, buf34, primals_27, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_27 buf39 = buf33 del buf33 buf40 = buf32 del buf32 triton_poi_fused_native_layer_norm_11[grid(16)](buf38, buf39, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf38, buf39, buf40, primals_28, primals_29, buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf39 del buf40 del primals_29 return (reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0), primals_1, primals_10, primals_22, primals_28, buf7, buf10, reinterpret_tensor (buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor( primals_13, (16, 4), (4, 1), 0), buf24, buf27, reinterpret_tensor( buf29, (16, 4), (4, 1), 0), reinterpret_tensor(buf31, (16, 4), (4, 1), 0), buf34, reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(buf38, (16, 4), (4, 1), 0), primals_26, buf42, primals_24, primals_20, reinterpret_tensor(buf21, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf18, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0), primals_14, primals_8, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerDecoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.dropout = nn.Dropout(p=dropout) self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def forward(self, input_0, input_1, input_2): primals_2 = self.self_attention.q_lin.weight primals_3 = self.self_attention.q_lin.bias primals_4 = self.self_attention.k_lin.weight primals_5 = self.self_attention.k_lin.bias primals_6 = self.self_attention.v_lin.weight primals_7 = self.self_attention.v_lin.bias primals_8 = self.self_attention.out_lin.weight primals_9 = self.self_attention.out_lin.bias primals_10 = self.norm1.weight primals_11 = self.norm1.bias primals_12 = self.encoder_attention.q_lin.weight primals_15 = self.encoder_attention.q_lin.bias primals_14 = self.encoder_attention.k_lin.weight primals_17 = self.encoder_attention.k_lin.bias primals_16 = self.encoder_attention.v_lin.weight primals_19 = self.encoder_attention.v_lin.bias primals_18 = self.encoder_attention.out_lin.weight primals_21 = self.encoder_attention.out_lin.bias primals_22 = self.norm2.weight primals_23 = self.norm2.bias primals_20 = self.ffn.lin1.weight primals_25 = self.ffn.lin1.bias primals_24 = self.ffn.lin2.weight primals_27 = self.ffn.lin2.bias primals_28 = self.norm3.weight primals_29 = self.norm3.bias primals_1 = input_0 primals_13 = input_1 primals_26 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0]
FrankVerhoef/Persona-Dialogue-Generation
TransformerDecoderLayer
false
5,209
[ "MIT" ]
1
ffd8413c2e8b6446097902dd1c496aeb24b852b4
https://github.com/FrankVerhoef/Persona-Dialogue-Generation/tree/ffd8413c2e8b6446097902dd1c496aeb24b852b4
FeedForward
import torch import torch.nn.functional as F from torch import nn class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'expansion_factor': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4 class FeedForwardNew(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
GimmeSpoon/mlp-singer
FeedForward
false
5,210
[ "MIT" ]
1
36d10a23c46fa7400994ccd063de79ff089efd5e
https://github.com/GimmeSpoon/mlp-singer/tree/36d10a23c46fa7400994ccd063de79ff089efd5e
ChannelMixer
import torch import torch.nn.functional as F from torch import nn class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class ChannelMixer(nn.Module): def __init__(self, d_model, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(d_model, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = self.mlp(x) out = x + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'expansion_factor': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_gelu_2[grid(1024)](buf3, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0 ), primals_6, primals_4 class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class ChannelMixerNew(nn.Module): def __init__(self, d_model, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(d_model, expansion_factor, dropout) def forward(self, input_0): primals_2 = self.norm.weight primals_3 = self.norm.bias primals_4 = self.mlp.fc1.weight primals_5 = self.mlp.fc1.bias primals_6 = self.mlp.fc2.weight primals_7 = self.mlp.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
GimmeSpoon/mlp-singer
ChannelMixer
false
5,211
[ "MIT" ]
1
36d10a23c46fa7400994ccd063de79ff089efd5e
https://github.com/GimmeSpoon/mlp-singer/tree/36d10a23c46fa7400994ccd063de79ff089efd5e
GCN
from torch.nn import Module import math import torch from math import * import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return torch.sigmoid(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math from math import * import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) del buf3 buf5 = buf4 del buf4 triton_poi_fused_add_sigmoid_1[grid(16)](buf5, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 return buf5, buf2, buf5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
GeekV5/PaperReProduction20200425
GCN
false
5,212
[ "Apache-2.0" ]
1
5c44da3c2fac89dd316a5e4930a78d023a12176d
https://github.com/GeekV5/PaperReProduction20200425/tree/5c44da3c2fac89dd316a5e4930a78d023a12176d
ModulatedConv2d
from torch.autograd import Function import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_3 = self.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CTPLab/IID_representation_learning
ModulatedConv2d
false
5,213
[ "MIT" ]
1
b9dc13536963f9af332b039f7cc772e2f1090c62
https://github.com/CTPLab/IID_representation_learning/tree/b9dc13536963f9af332b039f7cc772e2f1090c62
C3D
import torch import torch.nn as nn class C3D(nn.Module): def __init__(self, num_classes): super(C3D, self).__init__() self.conv1a = nn.Conv3d(in_channels=3, out_channels=64, kernel_size =(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.conv2a = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) self.fc6 = nn.Linear(8192, 4096) self.fc7 = nn.Linear(4096, 4096) self.fc8 = nn.Linear(4096, num_classes) def forward(self, x): x = self.pool1(self.conv1a(x)) x = self.pool2(self.conv2a(x)) x = self.pool3(self.conv3b(self.conv3a(x))) x = self.pool4(self.conv4b(self.conv4a(x))) x = self.pool5(self.conv5b(self.conv5a(x))) x = x.view(-1, 8192) fc6_features = self.fc6(x) fc7_features = self.fc7(fc6_features) logits = self.fc8(fc7_features) return logits def get_inputs(): return [torch.rand([4, 3, 64, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 262144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 8192 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 128 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (4096, 8192), (8192, 1)) assert_size_stride(primals_19, (4096,), (1,)) assert_size_stride(primals_20, (4096, 4096), (4096, 1)) assert_size_stride(primals_21, (4096,), (1,)) assert_size_stride(primals_22, (4, 4096), (4096, 1)) assert_size_stride(primals_23, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64, 64), (16777216, 262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(67108864)](buf1, primals_2, 67108864, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2, 2], [1, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 64, 32, 32), (8388608, 65536, 1024, 32, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_1[grid(33554432)](buf6, primals_5, 33554432, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2, 2], [2, 2, 2]) buf8 = buf7[0] buf9 = buf7[1] del buf7 buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1 ), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 32, 16, 16), (2097152, 8192, 256, 16, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_2[grid(8388608)](buf11, primals_7, 8388608, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 32, 16, 16), (2097152, 8192, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_2[grid(8388608)](buf13, primals_9, 8388608, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2, 2], [2, 2, 2]) buf15 = buf14[0] buf16 = buf14[1] del buf14 buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_3[grid(2097152)](buf18, primals_11, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_3[grid(2097152)](buf20, primals_13, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2, 2], [2, 2, 2]) buf22 = buf21[0] buf23 = buf21[1] del buf21 buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_4[grid(262144)](buf25, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_4[grid(262144)](buf27, primals_17, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2, 2], [2, 2, 2], [0, 1, 1]) buf29 = buf28[0] buf30 = buf28[1] del buf28 buf31 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32) extern_kernels.addmm(primals_19, reinterpret_tensor(buf29, (9, 8192 ), (8192, 1), 0), reinterpret_tensor(primals_18, (8192, 4096), (1, 8192), 0), alpha=1, beta=1, out=buf31) del primals_19 buf32 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32) extern_kernels.addmm(primals_21, buf31, reinterpret_tensor( primals_20, (4096, 4096), (1, 4096), 0), alpha=1, beta=1, out=buf32 ) del primals_21 buf33 = empty_strided_cuda((9, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_23, buf32, reinterpret_tensor( primals_22, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf33) del primals_23 return (buf33, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22, buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (9, 8192), ( 8192, 1), 0), buf31, buf32, primals_22, primals_20, primals_18) class C3DNew(nn.Module): def __init__(self, num_classes): super(C3DNew, self).__init__() self.conv1a = nn.Conv3d(in_channels=3, out_channels=64, kernel_size =(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.conv2a = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) self.fc6 = nn.Linear(8192, 4096) self.fc7 = nn.Linear(4096, 4096) self.fc8 = nn.Linear(4096, num_classes) def forward(self, input_0): primals_1 = self.conv1a.weight primals_2 = self.conv1a.bias primals_4 = self.conv2a.weight primals_5 = self.conv2a.bias primals_6 = self.conv3a.weight primals_7 = self.conv3a.bias primals_8 = self.conv3b.weight primals_9 = self.conv3b.bias primals_10 = self.conv4a.weight primals_11 = self.conv4a.bias primals_12 = self.conv4b.weight primals_13 = self.conv4b.bias primals_14 = self.conv5a.weight primals_15 = self.conv5a.bias primals_16 = self.conv5b.weight primals_17 = self.conv5b.bias primals_18 = self.fc6.weight primals_19 = self.fc6.bias primals_20 = self.fc7.weight primals_21 = self.fc7.bias primals_22 = self.fc8.weight primals_23 = self.fc8.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0]
DuyHung21/actionrecognition
C3D
false
5,214
[ "MIT" ]
1
a095b2e16db249bff97b1eebdab1e90468224fcb
https://github.com/DuyHung21/actionrecognition/tree/a095b2e16db249bff97b1eebdab1e90468224fcb
_GateAddNorm
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_glu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp4 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_glu_0[grid(256)](buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf1 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNormNew(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, input_0, input_1): primals_1 = self.glu.fc.weight primals_2 = self.glu.fc.bias primals_5 = self.add_norm.norm.weight primals_6 = self.add_norm.norm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Gian-Wiher/darts
_GateAddNorm
false
5,215
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
InnerProductDecoder
import torch import torch.utils.data class InnerProductDecoder(torch.nn.Module): """The inner product decoder from the `"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_ paper .. math:: \\sigma(\\mathbf{Z}\\mathbf{Z}^{\\top}) where :math:`\\mathbf{Z} \\in \\mathbb{R}^{N \\times d}` denotes the latent space produced by the encoder.""" def forward(self, z, edge_index, sigmoid=True): """Decodes the latent variables :obj:`z` into edge probabilties for the given node-pairs :obj:`edge_index`. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1) return torch.sigmoid(value) if sigmoid else value def forward_all(self, z, sigmoid=True): """Decodes the latent variables :obj:`z` into a probabilistic dense adjacency matrix. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ adj = torch.matmul(z, z.t()) return torch.sigmoid(adj) if sigmoid else adj def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype =torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp7 = tl.load(in_ptr0 + (4 + x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + 4 * tmp4, xmask, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask, 'index out of bounds: 0 <= tmp10 < 4') tmp12 = tl.load(in_ptr1 + 4 * tmp10, xmask, eviction_policy='evict_last') tmp13 = tmp6 * tmp12 tmp14 = tl.load(in_ptr1 + (1 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tl.load(in_ptr1 + (2 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (2 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl.load(in_ptr1 + (3 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (3 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25.to(tl.float32) tmp27 = tl.sigmoid(tmp26) tl.store(out_ptr1 + x0, tmp27, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sigmoid_sum_0[grid(4)](arg0_1, arg1_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class InnerProductDecoderNew(torch.nn.Module): """The inner product decoder from the `"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_ paper .. math:: \\sigma(\\mathbf{Z}\\mathbf{Z}^{\\top}) where :math:`\\mathbf{Z} \\in \\mathbb{R}^{N \\times d}` denotes the latent space produced by the encoder.""" def forward_all(self, z, sigmoid=True): """Decodes the latent variables :obj:`z` into a probabilistic dense adjacency matrix. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ adj = torch.matmul(z, z.t()) return torch.sigmoid(adj) if sigmoid else adj def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
GrumpyZhou/pytorch_geometric
InnerProductDecoder
false
5,216
[ "MIT" ]
1
88c54e72d3e26ad48e9ccd99e5696c7f19269d94
https://github.com/GrumpyZhou/pytorch_geometric/tree/88c54e72d3e26ad48e9ccd99e5696c7f19269d94
TokenMixer
import torch import torch.nn.functional as F from torch import nn class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class TokenMixer(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(seq_len, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = x.transpose(1, 2) x = self.mlp(x) x = x.transpose(1, 2) out = x + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'seq_len': 4, 'expansion_factor': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x5 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp8, xmask) @triton.jit def triton_poi_fused_add_gelu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_add_gelu_2[grid(1024)](buf3, primals_5, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 16, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf6, primals_1, primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0 ), primals_6, primals_4 class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class TokenMixerNew(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(seq_len, expansion_factor, dropout) def forward(self, input_0): primals_2 = self.norm.weight primals_3 = self.norm.bias primals_4 = self.mlp.fc1.weight primals_5 = self.mlp.fc1.bias primals_6 = self.mlp.fc2.weight primals_7 = self.mlp.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
GimmeSpoon/mlp-singer
TokenMixer
false
5,218
[ "MIT" ]
1
36d10a23c46fa7400994ccd063de79ff089efd5e
https://github.com/GimmeSpoon/mlp-singer/tree/36d10a23c46fa7400994ccd063de79ff089efd5e
GrayLoss
import torch import torch.nn as nn class GrayLoss(nn.Module): def __init__(self): super(GrayLoss, self).__init__() self.l1 = nn.L1Loss() def forward(self, x): y = torch.ones_like(x) / 2.0 return 1 / self.l1(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_mean_mul_reciprocal_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.5 tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = tl.full([1], 1, tl.int32) tmp10 = tmp9 / tmp8 tmp11 = 1.0 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_div_mean_mul_reciprocal_sub_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class GrayLossNew(nn.Module): def __init__(self): super(GrayLossNew, self).__init__() self.l1 = nn.L1Loss() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
GuYuanjie/DeepFusionPrior
GrayLoss
false
5,219
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
GenNoise
import torch import torch.nn as nn class GenNoise(nn.Module): def __init__(self, dim2): super(GenNoise, self).__init__() self.dim2 = dim2 def forward(self, x): a = list(x.size()) a[1] = self.dim2 b = torch.zeros(a).type_as(x.data) b.normal_() x = torch.autograd.Variable(b) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim2': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = torch.ops.aten.normal_functional.default(buf0) del buf0 buf2 = buf1 del buf1 return buf2, class GenNoiseNew(nn.Module): def __init__(self, dim2): super(GenNoiseNew, self).__init__() self.dim2 = dim2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
GuYuanjie/DeepFusionPrior
GenNoise
false
5,220
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
NonBlurryLoss
import torch import torch.nn as nn class NonBlurryLoss(nn.Module): def __init__(self): """ Loss on the distance to 0.5 """ super(NonBlurryLoss, self).__init__() self.mse = nn.MSELoss() def forward(self, x): return 1 - self.mse(x, torch.ones_like(x) * 0.5) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_mul_rsub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.5 tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp9 - tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_mul_rsub_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class NonBlurryLossNew(nn.Module): def __init__(self): """ Loss on the distance to 0.5 """ super(NonBlurryLossNew, self).__init__() self.mse = nn.MSELoss() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
GuYuanjie/DeepFusionPrior
NonBlurryLoss
false
5,221
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
_GatedResidualNetwork
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output class _GatedResidualNetwork(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int', output_size: 'int', dropout: 'float'=0.1, context_size: 'int'=None, residual: 'bool'=False): super().__init__() self.input_size = input_size self.output_size = output_size self.context_size = context_size self.hidden_size = hidden_size self.dropout = dropout self.residual = residual if self.input_size != self.output_size and not self.residual: residual_size = self.input_size else: residual_size = self.output_size if self.output_size != residual_size: self.resample_norm = _ResampleNorm(residual_size, self.output_size) self.fc1 = nn.Linear(self.input_size, self.hidden_size) self.elu = nn.ELU() if self.context_size is not None: self.context = nn.Linear(self.context_size, self.hidden_size, bias=False) self.fc2 = nn.Linear(self.hidden_size, self.hidden_size) self.init_weights() self.gate_norm = _GateAddNorm(input_size=self.hidden_size, skip_size=self.output_size, hidden_size=self.output_size, dropout=self.dropout, trainable_add=False) def init_weights(self): for name, p in self.named_parameters(): if 'bias' in name: torch.nn.init.zeros_(p) elif 'fc1' in name or 'fc2' in name: torch.nn.init.kaiming_normal_(p, a=0, mode='fan_in', nonlinearity='leaky_relu') elif 'context' in name: torch.nn.init.xavier_uniform_(p) def forward(self, x, context=None, residual=None): if residual is None: residual = x if self.input_size != self.output_size and not self.residual: residual = self.resample_norm(residual) x = self.fc1(x) if context is not None: context = self.context(context) x = x + context x = self.elu(x) x = self.fc2(x) x = self.gate_norm(x, residual) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_glu_1[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(64)](buf4, primals_1, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(256)](buf4, primals_1, buf5, buf6, primals_8, primals_9, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 del primals_9 return buf7, primals_1, primals_8, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf4, primals_6, primals_4 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output class _GatedResidualNetworkNew(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int', output_size: 'int', dropout: 'float'=0.1, context_size: 'int'=None, residual: 'bool'=False): super().__init__() self.input_size = input_size self.output_size = output_size self.context_size = context_size self.hidden_size = hidden_size self.dropout = dropout self.residual = residual if self.input_size != self.output_size and not self.residual: residual_size = self.input_size else: residual_size = self.output_size if self.output_size != residual_size: self.resample_norm = _ResampleNorm(residual_size, self.output_size) self.fc1 = nn.Linear(self.input_size, self.hidden_size) self.elu = nn.ELU() if self.context_size is not None: self.context = nn.Linear(self.context_size, self.hidden_size, bias=False) self.fc2 = nn.Linear(self.hidden_size, self.hidden_size) self.init_weights() self.gate_norm = _GateAddNorm(input_size=self.hidden_size, skip_size=self.output_size, hidden_size=self.output_size, dropout=self.dropout, trainable_add=False) def init_weights(self): for name, p in self.named_parameters(): if 'bias' in name: torch.nn.init.zeros_(p) elif 'fc1' in name or 'fc2' in name: torch.nn.init.kaiming_normal_(p, a=0, mode='fan_in', nonlinearity='leaky_relu') elif 'context' in name: torch.nn.init.xavier_uniform_(p) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.gate_norm.glu.fc.weight primals_7 = self.gate_norm.glu.fc.bias primals_8 = self.gate_norm.add_norm.norm.weight primals_9 = self.gate_norm.add_norm.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Gian-Wiher/darts
_GatedResidualNetwork
false
5,222
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
TabularNetD
import torch import numpy as np import matplotlib.pyplot as plt import torch.nn as nn import torch.optim as optim class GaussianNoise(nn.Module): """Gaussian noise regularizer""" def __init__(self, device, sigma=0.1): super().__init__() self.device = device self.sigma = sigma def forward(self, x): if self.training and self.sigma != 0.0: sampled_noise = torch.randn(*x.size(), device=self.device ) * self.sigma x = x + sampled_noise return x class NetUtils: """Contains utils to be inherited by other nets in this project""" def __init__(self): self.epoch = 0 self.streaming_weight_history = {} self.streaming_gradient_history = {} self.histogram_weight_history = {} self.histogram_gradient_history = {} self.gnorm_history = {} self.gnorm_total_history = [] self.wnorm_history = {} self.wnorm_total_history = [] self.layer_list = [] self.layer_list_names = [] self.loss = [] self.losses = [] self.norm_num = 2 self.bins = 20 def init_layer_list(self): """Initializes list of layers for tracking history""" nn_module_ignore_list = {'batchnorm', 'activation', 'loss', 'Noise', 'CustomCatGANLayer'} self.layer_list = [x for x in self._modules.values() if not any( excl in str(type(x)) for excl in nn_module_ignore_list)] self.layer_list_names = [x for x in self._modules.keys() if not any (excl in str(type(self._modules[x])) for excl in nn_module_ignore_list)] def init_history(self): """Initializes objects for storing history based on layer_list""" for layer in self.layer_list: self.streaming_weight_history[layer] = {'weight': [], 'bias': []} self.streaming_gradient_history[layer] = {'weight': [], 'bias': []} self.histogram_weight_history[layer] = {'weight': [], 'bias': []} self.histogram_gradient_history[layer] = {'weight': [], 'bias': []} self.wnorm_history[layer] = {'weight': [], 'bias': []} self.gnorm_history[layer] = {'weight': [], 'bias': []} def next_epoch(self): """Resets internal storage of training history to stream next epoch""" self.epoch += 1 self.losses.append(np.mean(self.loss)) self.loss = [] self.update_wnormz() self.update_gnormz() self.update_hist_list() for layer in self.layer_list: self.streaming_weight_history[layer] = {'weight': [], 'bias': []} self.streaming_gradient_history[layer] = {'weight': [], 'bias': []} def store_weight_and_grad_norms(self): """ Appends training history for summarization and visualization later. Scales each norm by the number of elements. Should be ran once per step per subnet. """ for layer in self.layer_list: self.streaming_weight_history[layer]['weight'].append(layer. weight.norm(self.norm_num).detach().cpu().numpy().take(0) / layer.weight.numel()) self.streaming_weight_history[layer]['bias'].append(layer.bias. norm(self.norm_num).detach().cpu().numpy().take(0) / layer. bias.numel()) self.streaming_gradient_history[layer]['weight'].append(layer. weight.grad.norm(self.norm_num).detach().cpu().numpy().take (0) / layer.weight.grad.numel()) self.streaming_gradient_history[layer]['bias'].append(layer. bias.grad.norm(self.norm_num).detach().cpu().numpy().take(0 ) / layer.bias.grad.numel()) def update_hist_list(self): """ Updates the histogram history based on the weights at the end of an epoch. Should be ran once per epoch per subnet. """ for layer in self.layer_list: self.histogram_weight_history[layer]['weight'].append(np. histogram(layer.weight.detach().cpu().numpy().reshape(-1), bins=self.bins)) self.histogram_weight_history[layer]['bias'].append(np. histogram(layer.bias.detach().cpu().numpy().reshape(-1), bins=self.bins)) if self.epoch == 0: self.histogram_gradient_history[layer]['weight'].append(None) self.histogram_gradient_history[layer]['bias'].append(None) else: self.histogram_gradient_history[layer]['weight'].append(np. histogram(layer.weight.grad.detach().cpu().numpy(). reshape(-1), bins=self.bins)) self.histogram_gradient_history[layer]['bias'].append(np. histogram(layer.bias.grad.detach().cpu().numpy(). reshape(-1), bins=self.bins)) def update_wnormz(self): """ Tracks history of desired norm of weights. Should be ran once per epoch per subnet. :param norm_num: 1 = l1 norm, 2 = l2 norm :return: list of norms of weights by layer, as well as overall weight norm """ total_norm = 0 for layer in self.wnorm_history: w_norm = np.linalg.norm(self.streaming_weight_history[layer][ 'weight'], self.norm_num) b_norm = np.linalg.norm(self.streaming_weight_history[layer][ 'bias'], self.norm_num) self.wnorm_history[layer]['weight'].append(w_norm) self.wnorm_history[layer]['bias'].append(b_norm) if self.norm_num == 1: total_norm += abs(w_norm) + abs(b_norm) else: total_norm += w_norm ** self.norm_num + b_norm ** self.norm_num total_norm = total_norm ** (1.0 / self.norm_num) self.wnorm_total_history.append(total_norm) def update_gnormz(self): """ Calculates gradient norms by layer as well as overall. Scales each norm by the number of elements. Should be ran once per epoch per subnet. :param norm_num: 1 = l1 norm, 2 = l2 norm :return: list of gradient norms by layer, as well as overall gradient norm """ total_norm = 0 for layer in self.gnorm_history: w_norm = np.linalg.norm(self.streaming_gradient_history[layer][ 'weight'], self.norm_num) / len(self. streaming_gradient_history[layer]['weight']) b_norm = np.linalg.norm(self.streaming_gradient_history[layer][ 'bias'], self.norm_num) / len(self. streaming_gradient_history[layer]['bias']) self.gnorm_history[layer]['weight'].append(w_norm) self.gnorm_history[layer]['bias'].append(b_norm) if self.norm_num == 1: total_norm += abs(w_norm) + abs(b_norm) else: total_norm += w_norm ** self.norm_num + b_norm ** self.norm_num total_norm = total_norm ** (1.0 / self.norm_num) / len(self. gnorm_history) self.gnorm_total_history.append(total_norm) def weights_init(self): """ Custom weights initialization for subnets Should only be run when first creating net. Will reset effects of training if run after training. """ for layer_name in self._modules: m = self._modules[layer_name] classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) nn.init.constant_(m.bias.data, 0) elif classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) nn.init.constant_(m.bias.data, 0) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) def plot_layer_scatters(self, figsize=(20, 10), show=True, save=None): """Plot weight and gradient norm history for each layer in layer_list across epochs""" assert self.epoch > 0, 'Model needs to be trained first' if save is None: save = self.path f, axes = plt.subplots(len(self.layer_list), 4, figsize=figsize, sharex=True) axes[0, 0].title.set_text('Weight Norms') axes[0, 1].title.set_text('Weight Gradient Norms') axes[0, 2].title.set_text('Bias Norms') axes[0, 3].title.set_text('Bias Gradient Norms') for i in range(4): axes[len(self.layer_list) - 1, i].set_xlabel('epochs') for i, layer in enumerate(self.layer_list): axes[i, 0].set_ylabel(self.layer_list_names[i]) axes[i, 0].plot(self.wnorm_history[layer]['weight']) axes[i, 1].plot(self.gnorm_history[layer]['weight']) axes[i, 2].plot(self.wnorm_history[layer]['bias']) axes[i, 3].plot(self.gnorm_history[layer]['bias']) sup = self.name + ' Layer Weight and Gradient Norms' st = f.suptitle(sup, fontsize='x-large') f.tight_layout() st.set_y(0.96) f.subplots_adjust(top=0.9) if show: f.show() if save: assert os.path.exists(save ), 'Check that the desired save path exists.' os.makedirs(os.path.join(save, 'layer_scatters'), exist_ok=True) f.savefig(os.path.join(save, 'layer_scatters', self.name + '_layer_scatters.png')) def plot_layer_hists(self, epoch=None, figsize=(20, 10), show=True, save=None): """Plots histograms of weight and gradients for each layer in layer_list at the desired epoch""" if epoch is None: epoch = self.epoch if save is None: save = self.path f, axes = plt.subplots(len(self.layer_list), 4, figsize=figsize, sharex=False) axes[0, 0].title.set_text('Weight Histograms') axes[0, 1].title.set_text('Weight Gradient Histograms') axes[0, 2].title.set_text('Bias Histograms') axes[0, 3].title.set_text('Bias Gradient Histograms') for i in range(4): axes[len(self.layer_list) - 1, i].set_xlabel('Value') for i, layer in enumerate(self.layer_list): axes[i, 0].set_ylabel(self.layer_list_names[i]) plt.sca(axes[i, 0]) uu.convert_np_hist_to_plot(self.histogram_weight_history[layer] ['weight'][epoch]) plt.sca(axes[i, 2]) uu.convert_np_hist_to_plot(self.histogram_weight_history[layer] ['bias'][epoch]) if epoch == 0: pass else: plt.sca(axes[i, 1]) uu.convert_np_hist_to_plot(self.histogram_gradient_history[ layer]['weight'][epoch]) plt.sca(axes[i, 3]) uu.convert_np_hist_to_plot(self.histogram_gradient_history[ layer]['bias'][epoch]) sup = (self.name + ' Layer Weight and Gradient Histograms - Epoch ' + str(epoch)) st = f.suptitle(sup, fontsize='x-large') f.tight_layout() st.set_y(0.96) f.subplots_adjust(top=0.9) if show: f.show() if save: assert os.path.exists(save ), 'Check that the desired save path exists.' os.makedirs(os.path.join(save, 'layer_histograms'), exist_ok=True) f.savefig(os.path.join(save, 'layer_histograms', self.name + '_epoch_' + str(epoch) + '_layer_histograms.png')) def build_hist_gif(self, path=None, start=0, stop=None, freq=1, fps=5, final_img_frames=20): """ Loop through self.histogram_weight_history and saves the images to a folder. :param path: Path to folder to save images. Folder will be created if it does not already exist. :param start: Epoch to start gif on. Default 0. :param stop: Epoch to end gif on. Default self.epoch (number of epochs trained so far). :param freq: Interval of skipping epochs. Defaults to 1 (no skipping). :param fps: Number of frames to display per second in gif. Defaults to 5. :param final_img_frames: Number of times to repeat final image of gif before it will restart. Defaults to 20 (4 seconds with 5 fps). :return: Saves a gif with the title net + _histogram_generation_animation.gif (as well as the images comprising the gif into the layer_histograms folder) """ assert len(self.histogram_weight_history[self.layer_list[0]]['weight'] ) > 1, 'Model not yet trained' if path is None: path = self.path if stop is None: stop = self.epoch ims = [] for epoch in range(start, stop + freq, freq): self.plot_layer_hists(epoch=epoch, show=False, save=path) img_name = os.path.join(path, 'layer_histograms', self.name + '_epoch_' + str(epoch) + '_layer_histograms.png') ims.append(imageio.imread(img_name)) plt.close() if epoch == stop + freq: for i in range(final_img_frames): ims.append(imageio.imread(img_name)) plt.close() imageio.mimsave(os.path.join(path, self.name + '_histogram_generation_animation.gif'), ims, fps=fps) @torch.utils.hooks.unserializable_hook def activations_hook(self, grad): """ Used for Grad CAM Hook for the gradients of the activations Used on the final convolutional layer """ self.gradients = grad def get_activations_gradient(self): """Grad CAM Helper Function""" return self.gradients def get_activations(self): """Grad CAM Helper Function""" return self.final_conv_output class TabularNetD(nn.Module, NetUtils): def __init__(self, device, H, out_dim, nc, noise, lr=0.0002, beta1=0.5, beta2=0.999, wd=0): super().__init__() NetUtils.__init__(self) self.name = 'Discriminator' self.device = device self.loss_real = None self.loss_fake = None self.noise = GaussianNoise(device=self.device, sigma=noise) self.fc1 = nn.Linear(out_dim + nc, H, bias=True) self.output = nn.Linear(H, 1, bias=True) self.act = nn.LeakyReLU(0.2) self.m = nn.Sigmoid() self.loss_fn = nn.BCELoss() self.opt = optim.Adam(self.parameters(), lr=lr, betas=(beta1, beta2 ), weight_decay=wd) self.init_layer_list() self.init_history() self.update_hist_list() self.D_x = [] self.Avg_D_reals = [] self.D_G_z1 = [] self.Avg_D_fakes = [] self.weights_init() def forward(self, row, labels): """ :param row: Row of input data to discriminate on :param labels: Label embedding :return: Binary classification (sigmoid activation on a single unit hidden layer) """ row = self.noise(row) x = torch.cat([row, labels], 1) x = self.act(self.fc1(x)) return self.m(self.output(x)) def train_one_step_real(self, output, label): self.zero_grad() self.loss_real = self.loss_fn(output, label) self.loss_real.backward() self.D_x.append(output.mean().item()) def train_one_step_fake(self, output, label): self.loss_fake = self.loss_fn(output, label) self.loss_fake.backward() self.D_G_z1.append(output.mean().item()) def combine_and_update_opt(self): self.loss.append(self.loss_real.item() + self.loss_fake.item()) self.opt.step() self.store_weight_and_grad_norms() def next_epoch_discrim(self): """Discriminator specific actions""" self.Avg_D_reals.append(np.mean(self.D_x)) self.D_x = [] self.Avg_D_fakes.append(np.mean(self.D_G_z1)) self.D_G_z1 = [] def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'device': 0, 'H': 4, 'out_dim': 4, 'nc': 4, 'noise': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import matplotlib.pyplot as plt import torch.nn as nn import torch.optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(16)](buf1, primals_4, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del primals_4 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_sigmoid_2[grid(4)](buf5, primals_6, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_6 return buf5, buf0, buf2, buf3, buf5, primals_5 class GaussianNoise(nn.Module): """Gaussian noise regularizer""" def __init__(self, device, sigma=0.1): super().__init__() self.device = device self.sigma = sigma def forward(self, x): if self.training and self.sigma != 0.0: sampled_noise = torch.randn(*x.size(), device=self.device ) * self.sigma x = x + sampled_noise return x class NetUtils: """Contains utils to be inherited by other nets in this project""" def __init__(self): self.epoch = 0 self.streaming_weight_history = {} self.streaming_gradient_history = {} self.histogram_weight_history = {} self.histogram_gradient_history = {} self.gnorm_history = {} self.gnorm_total_history = [] self.wnorm_history = {} self.wnorm_total_history = [] self.layer_list = [] self.layer_list_names = [] self.loss = [] self.losses = [] self.norm_num = 2 self.bins = 20 def init_layer_list(self): """Initializes list of layers for tracking history""" nn_module_ignore_list = {'batchnorm', 'activation', 'loss', 'Noise', 'CustomCatGANLayer'} self.layer_list = [x for x in self._modules.values() if not any( excl in str(type(x)) for excl in nn_module_ignore_list)] self.layer_list_names = [x for x in self._modules.keys() if not any (excl in str(type(self._modules[x])) for excl in nn_module_ignore_list)] def init_history(self): """Initializes objects for storing history based on layer_list""" for layer in self.layer_list: self.streaming_weight_history[layer] = {'weight': [], 'bias': []} self.streaming_gradient_history[layer] = {'weight': [], 'bias': []} self.histogram_weight_history[layer] = {'weight': [], 'bias': []} self.histogram_gradient_history[layer] = {'weight': [], 'bias': []} self.wnorm_history[layer] = {'weight': [], 'bias': []} self.gnorm_history[layer] = {'weight': [], 'bias': []} def next_epoch(self): """Resets internal storage of training history to stream next epoch""" self.epoch += 1 self.losses.append(np.mean(self.loss)) self.loss = [] self.update_wnormz() self.update_gnormz() self.update_hist_list() for layer in self.layer_list: self.streaming_weight_history[layer] = {'weight': [], 'bias': []} self.streaming_gradient_history[layer] = {'weight': [], 'bias': []} def store_weight_and_grad_norms(self): """ Appends training history for summarization and visualization later. Scales each norm by the number of elements. Should be ran once per step per subnet. """ for layer in self.layer_list: self.streaming_weight_history[layer]['weight'].append(layer. weight.norm(self.norm_num).detach().cpu().numpy().take(0) / layer.weight.numel()) self.streaming_weight_history[layer]['bias'].append(layer.bias. norm(self.norm_num).detach().cpu().numpy().take(0) / layer. bias.numel()) self.streaming_gradient_history[layer]['weight'].append(layer. weight.grad.norm(self.norm_num).detach().cpu().numpy().take (0) / layer.weight.grad.numel()) self.streaming_gradient_history[layer]['bias'].append(layer. bias.grad.norm(self.norm_num).detach().cpu().numpy().take(0 ) / layer.bias.grad.numel()) def update_hist_list(self): """ Updates the histogram history based on the weights at the end of an epoch. Should be ran once per epoch per subnet. """ for layer in self.layer_list: self.histogram_weight_history[layer]['weight'].append(np. histogram(layer.weight.detach().cpu().numpy().reshape(-1), bins=self.bins)) self.histogram_weight_history[layer]['bias'].append(np. histogram(layer.bias.detach().cpu().numpy().reshape(-1), bins=self.bins)) if self.epoch == 0: self.histogram_gradient_history[layer]['weight'].append(None) self.histogram_gradient_history[layer]['bias'].append(None) else: self.histogram_gradient_history[layer]['weight'].append(np. histogram(layer.weight.grad.detach().cpu().numpy(). reshape(-1), bins=self.bins)) self.histogram_gradient_history[layer]['bias'].append(np. histogram(layer.bias.grad.detach().cpu().numpy(). reshape(-1), bins=self.bins)) def update_wnormz(self): """ Tracks history of desired norm of weights. Should be ran once per epoch per subnet. :param norm_num: 1 = l1 norm, 2 = l2 norm :return: list of norms of weights by layer, as well as overall weight norm """ total_norm = 0 for layer in self.wnorm_history: w_norm = np.linalg.norm(self.streaming_weight_history[layer][ 'weight'], self.norm_num) b_norm = np.linalg.norm(self.streaming_weight_history[layer][ 'bias'], self.norm_num) self.wnorm_history[layer]['weight'].append(w_norm) self.wnorm_history[layer]['bias'].append(b_norm) if self.norm_num == 1: total_norm += abs(w_norm) + abs(b_norm) else: total_norm += w_norm ** self.norm_num + b_norm ** self.norm_num total_norm = total_norm ** (1.0 / self.norm_num) self.wnorm_total_history.append(total_norm) def update_gnormz(self): """ Calculates gradient norms by layer as well as overall. Scales each norm by the number of elements. Should be ran once per epoch per subnet. :param norm_num: 1 = l1 norm, 2 = l2 norm :return: list of gradient norms by layer, as well as overall gradient norm """ total_norm = 0 for layer in self.gnorm_history: w_norm = np.linalg.norm(self.streaming_gradient_history[layer][ 'weight'], self.norm_num) / len(self. streaming_gradient_history[layer]['weight']) b_norm = np.linalg.norm(self.streaming_gradient_history[layer][ 'bias'], self.norm_num) / len(self. streaming_gradient_history[layer]['bias']) self.gnorm_history[layer]['weight'].append(w_norm) self.gnorm_history[layer]['bias'].append(b_norm) if self.norm_num == 1: total_norm += abs(w_norm) + abs(b_norm) else: total_norm += w_norm ** self.norm_num + b_norm ** self.norm_num total_norm = total_norm ** (1.0 / self.norm_num) / len(self. gnorm_history) self.gnorm_total_history.append(total_norm) def weights_init(self): """ Custom weights initialization for subnets Should only be run when first creating net. Will reset effects of training if run after training. """ for layer_name in self._modules: m = self._modules[layer_name] classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) nn.init.constant_(m.bias.data, 0) elif classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) nn.init.constant_(m.bias.data, 0) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) def plot_layer_scatters(self, figsize=(20, 10), show=True, save=None): """Plot weight and gradient norm history for each layer in layer_list across epochs""" assert self.epoch > 0, 'Model needs to be trained first' if save is None: save = self.path f, axes = plt.subplots(len(self.layer_list), 4, figsize=figsize, sharex=True) axes[0, 0].title.set_text('Weight Norms') axes[0, 1].title.set_text('Weight Gradient Norms') axes[0, 2].title.set_text('Bias Norms') axes[0, 3].title.set_text('Bias Gradient Norms') for i in range(4): axes[len(self.layer_list) - 1, i].set_xlabel('epochs') for i, layer in enumerate(self.layer_list): axes[i, 0].set_ylabel(self.layer_list_names[i]) axes[i, 0].plot(self.wnorm_history[layer]['weight']) axes[i, 1].plot(self.gnorm_history[layer]['weight']) axes[i, 2].plot(self.wnorm_history[layer]['bias']) axes[i, 3].plot(self.gnorm_history[layer]['bias']) sup = self.name + ' Layer Weight and Gradient Norms' st = f.suptitle(sup, fontsize='x-large') f.tight_layout() st.set_y(0.96) f.subplots_adjust(top=0.9) if show: f.show() if save: assert os.path.exists(save ), 'Check that the desired save path exists.' os.makedirs(os.path.join(save, 'layer_scatters'), exist_ok=True) f.savefig(os.path.join(save, 'layer_scatters', self.name + '_layer_scatters.png')) def plot_layer_hists(self, epoch=None, figsize=(20, 10), show=True, save=None): """Plots histograms of weight and gradients for each layer in layer_list at the desired epoch""" if epoch is None: epoch = self.epoch if save is None: save = self.path f, axes = plt.subplots(len(self.layer_list), 4, figsize=figsize, sharex=False) axes[0, 0].title.set_text('Weight Histograms') axes[0, 1].title.set_text('Weight Gradient Histograms') axes[0, 2].title.set_text('Bias Histograms') axes[0, 3].title.set_text('Bias Gradient Histograms') for i in range(4): axes[len(self.layer_list) - 1, i].set_xlabel('Value') for i, layer in enumerate(self.layer_list): axes[i, 0].set_ylabel(self.layer_list_names[i]) plt.sca(axes[i, 0]) uu.convert_np_hist_to_plot(self.histogram_weight_history[layer] ['weight'][epoch]) plt.sca(axes[i, 2]) uu.convert_np_hist_to_plot(self.histogram_weight_history[layer] ['bias'][epoch]) if epoch == 0: pass else: plt.sca(axes[i, 1]) uu.convert_np_hist_to_plot(self.histogram_gradient_history[ layer]['weight'][epoch]) plt.sca(axes[i, 3]) uu.convert_np_hist_to_plot(self.histogram_gradient_history[ layer]['bias'][epoch]) sup = (self.name + ' Layer Weight and Gradient Histograms - Epoch ' + str(epoch)) st = f.suptitle(sup, fontsize='x-large') f.tight_layout() st.set_y(0.96) f.subplots_adjust(top=0.9) if show: f.show() if save: assert os.path.exists(save ), 'Check that the desired save path exists.' os.makedirs(os.path.join(save, 'layer_histograms'), exist_ok=True) f.savefig(os.path.join(save, 'layer_histograms', self.name + '_epoch_' + str(epoch) + '_layer_histograms.png')) def build_hist_gif(self, path=None, start=0, stop=None, freq=1, fps=5, final_img_frames=20): """ Loop through self.histogram_weight_history and saves the images to a folder. :param path: Path to folder to save images. Folder will be created if it does not already exist. :param start: Epoch to start gif on. Default 0. :param stop: Epoch to end gif on. Default self.epoch (number of epochs trained so far). :param freq: Interval of skipping epochs. Defaults to 1 (no skipping). :param fps: Number of frames to display per second in gif. Defaults to 5. :param final_img_frames: Number of times to repeat final image of gif before it will restart. Defaults to 20 (4 seconds with 5 fps). :return: Saves a gif with the title net + _histogram_generation_animation.gif (as well as the images comprising the gif into the layer_histograms folder) """ assert len(self.histogram_weight_history[self.layer_list[0]]['weight'] ) > 1, 'Model not yet trained' if path is None: path = self.path if stop is None: stop = self.epoch ims = [] for epoch in range(start, stop + freq, freq): self.plot_layer_hists(epoch=epoch, show=False, save=path) img_name = os.path.join(path, 'layer_histograms', self.name + '_epoch_' + str(epoch) + '_layer_histograms.png') ims.append(imageio.imread(img_name)) plt.close() if epoch == stop + freq: for i in range(final_img_frames): ims.append(imageio.imread(img_name)) plt.close() imageio.mimsave(os.path.join(path, self.name + '_histogram_generation_animation.gif'), ims, fps=fps) @torch.utils.hooks.unserializable_hook def activations_hook(self, grad): """ Used for Grad CAM Hook for the gradients of the activations Used on the final convolutional layer """ self.gradients = grad def get_activations_gradient(self): """Grad CAM Helper Function""" return self.gradients def get_activations(self): """Grad CAM Helper Function""" return self.final_conv_output class TabularNetDNew(nn.Module, NetUtils): def __init__(self, device, H, out_dim, nc, noise, lr=0.0002, beta1=0.5, beta2=0.999, wd=0): super().__init__() NetUtils.__init__(self) self.name = 'Discriminator' self.device = device self.loss_real = None self.loss_fake = None self.noise = GaussianNoise(device=self.device, sigma=noise) self.fc1 = nn.Linear(out_dim + nc, H, bias=True) self.output = nn.Linear(H, 1, bias=True) self.act = nn.LeakyReLU(0.2) self.m = nn.Sigmoid() self.loss_fn = nn.BCELoss() self.opt = optim.Adam(self.parameters(), lr=lr, betas=(beta1, beta2 ), weight_decay=wd) self.init_layer_list() self.init_history() self.update_hist_list() self.D_x = [] self.Avg_D_reals = [] self.D_G_z1 = [] self.Avg_D_fakes = [] self.weights_init() def train_one_step_real(self, output, label): self.zero_grad() self.loss_real = self.loss_fn(output, label) self.loss_real.backward() self.D_x.append(output.mean().item()) def train_one_step_fake(self, output, label): self.loss_fake = self.loss_fn(output, label) self.loss_fake.backward() self.D_G_z1.append(output.mean().item()) def combine_and_update_opt(self): self.loss.append(self.loss_real.item() + self.loss_fake.item()) self.opt.step() self.store_weight_and_grad_norms() def next_epoch_discrim(self): """Discriminator specific actions""" self.Avg_D_reals.append(np.mean(self.D_x)) self.D_x = [] self.Avg_D_fakes.append(np.mean(self.D_G_z1)) self.D_G_z1 = [] def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.output.weight primals_6 = self.output.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Atrus619/CSDGAN
TabularNetD
false
5,223
[ "MIT" ]
1
712be213e59b32a79a4970684d726af63616edaf
https://github.com/Atrus619/CSDGAN/tree/712be213e59b32a79a4970684d726af63616edaf
GradientLoss
import torch import torch.nn as nn class GradientLoss(nn.Module): """ L1 loss on the gradient of the picture """ def __init__(self): super(GradientLoss, self).__init__() def forward(self, a): gradient_a_x = torch.abs(a[:, :, :, :-1] - a[:, :, :, 1:]) gradient_a_y = torch.abs(a[:, :, :-1, :] - a[:, :, 1:, :]) return torch.mean(gradient_a_x) + torch.mean(gradient_a_y) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_mean_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 3 r1 = rindex // 3 r2 = rindex % 12 r3 = rindex // 12 tmp0 = tl.load(in_ptr0 + (r0 + 4 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (1 + r0 + 4 * r1), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (r2 + 16 * r3), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (4 + r2 + 16 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tl_math.abs(tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 192.0 tmp17 = tmp7 / tmp16 tmp18 = tmp15 / tmp16 tmp19 = tmp17 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_mean_sub_0[grid(1)](buf2, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class GradientLossNew(nn.Module): """ L1 loss on the gradient of the picture """ def __init__(self): super(GradientLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
GuYuanjie/DeepFusionPrior
GradientLoss
false
5,224
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
ScaledDotProductAttention
import torch import numpy as np import torch.nn as nn import torch.utils.data import torch.nn class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 2.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6.to(tl.float64) tmp21 = tmp20 * tmp1 tmp22 = tmp21.to(tl.float32) tmp23 = tmp19 / tmp22 tmp24 = tl_math.exp(tmp23) tl.store(out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, primals_4, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(64, 4)](buf1, primals_6, buf4, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_sqrt_2[grid(256)](buf5, buf6, 256, XBLOCK =256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_clone_0[grid(256)](buf2, primals_8, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0 ), primals_10, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttentionNew(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttentionNew, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, input_0, input_1, input_2): primals_3 = self.fc_q.weight primals_4 = self.fc_q.bias primals_5 = self.fc_k.weight primals_6 = self.fc_k.bias primals_7 = self.fc_v.weight primals_8 = self.fc_v.bias primals_10 = self.fc_o.weight primals_11 = self.fc_o.bias primals_1 = input_0 primals_2 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
GavinGuan95/Generative-VQA
ScaledDotProductAttention
false
5,225
[ "MIT" ]
1
0912e3a2426809ef4d4eb40bae667b31c2269161
https://github.com/GavinGuan95/Generative-VQA/tree/0912e3a2426809ef4d4eb40bae667b31c2269161
ScaledDotProductAttentionMemory
import torch import numpy as np import torch.nn as nn import torch.utils.data import torch.nn class ScaledDotProductAttentionMemory(nn.Module): """ Scaled dot-product attention with memory """ def __init__(self, d_model, d_k, d_v, h, m): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads :param m: Number of memory slots """ super(ScaledDotProductAttentionMemory, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.m_k = nn.Parameter(torch.FloatTensor(1, m, h * d_k)) self.m_v = nn.Parameter(torch.FloatTensor(1, m, h * d_v)) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.m = m self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.normal_(self.m_k, 0, 1 / self.d_k) nn.init.normal_(self.m_v, 0, 1 / self.m) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] m_k = np.sqrt(self.d_k) * self.m_k.expand(b_s, self.m, self.h * self.d_k) m_v = np.sqrt(self.m) * self.m_v.expand(b_s, self.m, self.h * self.d_v) q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = torch.cat([self.fc_k(keys), m_k], 1).view(b_s, nk + self.m, self.h, self.d_k).permute(0, 2, 3, 1) v = torch.cat([self.fc_v(values), m_v], 1).view(b_s, nk + self.m, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = torch.cat([att[:, :, :, :nk] * attention_weights, att[:, :, :, nk:]], -1) if attention_mask is not None: att[:, :, :, :nk] = att[:, :, :, :nk].masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4, 'm': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 16 x2 = xindex // 128 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 16 * x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = 2.0 tmp11 = tmp10 * tmp9 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_per_fused__softmax_sqrt_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 2.0, tl.float64) tmp2 = tl.full([1, 1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, float('-inf')) tmp11 = triton_helpers.max2(tmp10, 1)[:, None] tmp12 = tmp7 - tmp11 tmp13 = tmp6.to(tl.float64) tmp14 = tmp13 * tmp1 tmp15 = tmp14.to(tl.float32) tmp16 = tmp12 / tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp17 / tmp21 tl.store(out_ptr2 + (r1 + 8 * x0), tmp22, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 % 4 x3 = xindex // 128 x4 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * (-4 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = 2.0 tmp11 = tmp10 * tmp9 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 16), (64, 16, 1)) assert_size_stride(primals_4, (1, 4, 16), (64, 16, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf0) del primals_5 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_7 del primals_8 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(primals_11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_10 del primals_9 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_poi_fused_clone_1[grid(512)](buf1, primals_3, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 4, 8), (32, 8, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 8), (32, 8, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_per_fused__softmax_sqrt_2[grid(64)](buf5, buf8, 64, 8, XBLOCK=8, num_warps=2, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 8, 4), (128, 32, 4, 1), 0) del buf5 triton_poi_fused_clone_3[grid(512)](buf2, primals_4, buf9, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf10 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 8), (32, 8, 1), 0), reinterpret_tensor(buf9, (16, 8, 4), (32, 4, 1), 0), out=buf10) buf11 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clone_4[grid(256)](buf10, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf11, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf12) del primals_13 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_11, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (16, 16), (16, 1), 0 ), primals_12, reinterpret_tensor(buf9, (16, 4, 8), (32, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 8, 4), (32, 1, 8), 0) class ScaledDotProductAttentionMemoryNew(nn.Module): """ Scaled dot-product attention with memory """ def __init__(self, d_model, d_k, d_v, h, m): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads :param m: Number of memory slots """ super(ScaledDotProductAttentionMemoryNew, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.m_k = nn.Parameter(torch.FloatTensor(1, m, h * d_k)) self.m_v = nn.Parameter(torch.FloatTensor(1, m, h * d_v)) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.m = m self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.normal_(self.m_k, 0, 1 / self.d_k) nn.init.normal_(self.m_v, 0, 1 / self.m) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, input_0, input_1, input_2): primals_3 = self.m_k primals_4 = self.m_v primals_5 = self.fc_q.weight primals_6 = self.fc_q.bias primals_7 = self.fc_k.weight primals_8 = self.fc_k.bias primals_9 = self.fc_v.weight primals_10 = self.fc_v.bias primals_12 = self.fc_o.weight primals_13 = self.fc_o.bias primals_1 = input_0 primals_2 = input_1 primals_11 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
GavinGuan95/Generative-VQA
ScaledDotProductAttentionMemory
false
5,226
[ "MIT" ]
1
0912e3a2426809ef4d4eb40bae667b31c2269161
https://github.com/GavinGuan95/Generative-VQA/tree/0912e3a2426809ef4d4eb40bae667b31c2269161
VarianceLayer
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class VarianceLayer(nn.Module): def __init__(self, patch_size=5, channels=1): self.patch_size = patch_size super(VarianceLayer, self).__init__() mean_mask = np.ones((channels, channels, patch_size, patch_size)) / ( patch_size * patch_size) self.mean_mask = nn.Parameter(data=torch.FloatTensor(mean_mask), requires_grad=False) mask = np.zeros((channels, channels, patch_size, patch_size)) mask[:, :, patch_size // 2, patch_size // 2] = 1.0 self.ones_mask = nn.Parameter(data=torch.FloatTensor(mask), requires_grad=False) def forward(self, x): Ex_E = F.conv2d(x, self.ones_mask) - F.conv2d(x, self.mean_mask) return F.conv2d(Ex_E ** 2, self.mean_mask) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 14400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (1, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg2_1, (1, 1, 5, 5), (25, 25, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 60, 60), (3600, 3600, 60, 1)) del arg0_1 buf1 = extern_kernels.convolution(arg1_1, arg2_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 60, 60), (3600, 3600, 60, 1)) del arg1_1 buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_pow_sub_0[grid(14400)](buf2, buf1, 14400, XBLOCK= 128, num_warps=4, num_stages=1) del buf1 buf3 = extern_kernels.convolution(buf2, arg2_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 56, 56), (3136, 3136, 56, 1)) del arg2_1 del buf2 return buf3, class VarianceLayerNew(nn.Module): def __init__(self, patch_size=5, channels=1): self.patch_size = patch_size super(VarianceLayerNew, self).__init__() mean_mask = np.ones((channels, channels, patch_size, patch_size)) / ( patch_size * patch_size) self.mean_mask = nn.Parameter(data=torch.FloatTensor(mean_mask), requires_grad=False) mask = np.zeros((channels, channels, patch_size, patch_size)) mask[:, :, patch_size // 2, patch_size // 2] = 1.0 self.ones_mask = nn.Parameter(data=torch.FloatTensor(mask), requires_grad=False) def forward(self, input_0): arg0_1 = self.mean_mask arg2_1 = self.ones_mask arg1_1 = input_0 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
GuYuanjie/DeepFusionPrior
VarianceLayer
false
5,227
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
ROUGH_FILTER
import torch import torch.nn as nn class ROUGH_FILTER(nn.Module): def __init__(self, user_num, embedding_size): super(ROUGH_FILTER, self).__init__() self.in_user_embedding = nn.Embedding(user_num, embedding_size) def forward(self, out_user_embedding_weight): score = torch.mm(self.in_user_embedding.weight, out_user_embedding_weight.permute(1, 0)) score = torch.tanh(score) score = torch.relu(score) return score def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'user_num': 4, 'embedding_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_tanh_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = 0.0 tmp5 = tmp3 <= tmp4 tl.store(out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_tanh_threshold_backward_0[grid(16)](buf0, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, buf0, buf2, primals_2 class ROUGH_FILTERNew(nn.Module): def __init__(self, user_num, embedding_size): super(ROUGH_FILTERNew, self).__init__() self.in_user_embedding = nn.Embedding(user_num, embedding_size) def forward(self, input_0): primals_1 = self.in_user_embedding.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
GSL4Rec/GSL4Rec
ROUGH_FILTER
false
5,228
[ "Apache-2.0" ]
1
9cf8964957a6d9962bef42bd4908b4f10ef0771c
https://github.com/GSL4Rec/GSL4Rec/tree/9cf8964957a6d9962bef42bd4908b4f10ef0771c
GrayscaleLayer
import torch import torch.nn as nn class GrayscaleLayer(nn.Module): def __init__(self): super(GrayscaleLayer, self).__init__() def forward(self, x): return torch.mean(x, 1, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class GrayscaleLayerNew(nn.Module): def __init__(self): super(GrayscaleLayerNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
GuYuanjie/DeepFusionPrior
GrayscaleLayer
false
5,229
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
SpatialGC
import torch import torch.nn as nn class SpatialGC(nn.Module): """Sapatial Graph Convolution used in DR-GCB and RAM_r's encoder and decoder Args: in_channels (int): Number of channels in the input sequence data out_channels (int): Number of channels produced by the convolution kernel_size (int): Size of the graph convolving kernel bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` Shape: - Input[0]: Input graph sequence in :math:`(N, M, in_channels, T_{in}, V)` format - Input[1]: Input physical graph adjacency matrix in :math:`(K, V, V)` format - Output[0]: Output physical graph sequence in :math:`(N, M, out_channels, T_{out}, V)` format - Output[1]: Physical graph adjacency matrix for output data in :math:`(K, V, V)` format where :math:`N` is a batch size, :math:`M` is the number of instance in a frame. :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, :math:`T_{in}/T_{out}` is a length of input/output sequence, :math:`V` is the number of graph nodes. """ def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(in_channels, out_channels * kernel_size, kernel_size=(1, 1), bias=bias) def forward(self, x, A): assert A.size(0) == self.kernel_size x = self.conv(x) n, kc, t, v = x.size() x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v) x = torch.einsum('nkctv,kvw->nctw', (x, A)) return x.contiguous(), A def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x4 = xindex // 256 x5 = xindex // 16 % 16 x3 = xindex // 64 % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x5 + 64 * x1 + 256 * x4), xmask) tmp1 = tl.load(in_ptr1 + (x3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x6, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_4, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](buf0, primals_3, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 buf2 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (1, 64, 16), (0, 16, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4), (64, 4, 1), 0), out=buf2) del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0) class SpatialGCNew(nn.Module): """Sapatial Graph Convolution used in DR-GCB and RAM_r's encoder and decoder Args: in_channels (int): Number of channels in the input sequence data out_channels (int): Number of channels produced by the convolution kernel_size (int): Size of the graph convolving kernel bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` Shape: - Input[0]: Input graph sequence in :math:`(N, M, in_channels, T_{in}, V)` format - Input[1]: Input physical graph adjacency matrix in :math:`(K, V, V)` format - Output[0]: Output physical graph sequence in :math:`(N, M, out_channels, T_{out}, V)` format - Output[1]: Physical graph adjacency matrix for output data in :math:`(K, V, V)` format where :math:`N` is a batch size, :math:`M` is the number of instance in a frame. :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, :math:`T_{in}/T_{out}` is a length of input/output sequence, :math:`V` is the number of graph nodes. """ def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(in_channels, out_channels * kernel_size, kernel_size=(1, 1), bias=bias) def forward(self, input_0, input_1): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_4 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
GlenGGG/DR-GCN
SpatialGC
false
5,230
[ "Apache-2.0" ]
1
540e2ede803f78b87b862aa26d099fbc02173143
https://github.com/GlenGGG/DR-GCN/tree/540e2ede803f78b87b862aa26d099fbc02173143
GrayscaleLoss
import torch import torch.nn as nn class GrayscaleLayer(nn.Module): def __init__(self): super(GrayscaleLayer, self).__init__() def forward(self, x): return torch.mean(x, 1, keepdim=True) class GrayscaleLoss(nn.Module): def __init__(self): super(GrayscaleLoss, self).__init__() self.gray_scale = GrayscaleLayer() self.mse = nn.MSELoss() def forward(self, x, y): x_g = self.gray_scale(x) y_g = self.gray_scale(y) return self.mse(x_g, y_g) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp22 = 64.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mse_loss_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class GrayscaleLayer(nn.Module): def __init__(self): super(GrayscaleLayer, self).__init__() def forward(self, x): return torch.mean(x, 1, keepdim=True) class GrayscaleLossNew(nn.Module): def __init__(self): super(GrayscaleLossNew, self).__init__() self.gray_scale = GrayscaleLayer() self.mse = nn.MSELoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
GuYuanjie/DeepFusionPrior
GrayscaleLoss
false
5,231
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
VectorQuantizer
import torch from torch import Tensor import torch.nn as nn import torch.nn.functional as F class VectorQuantizer(nn.Module): """ Reference: [1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta: 'float'=0.25): super(VectorQuantizer, self).__init__() self.K = num_embeddings self.D = embedding_dim self.beta = beta self.embedding = nn.Embedding(self.K, self.D) self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K) def forward(self, latents: 'Tensor') ->Tensor: latents = latents.permute(0, 2, 3, 1).contiguous() latents_shape = latents.shape flat_latents = latents.view(-1, self.D) dist = torch.sum(flat_latents ** 2, dim=1, keepdim=True) + torch.sum( self.embedding.weight ** 2, dim=1) - 2 * torch.matmul(flat_latents, self.embedding.weight.t()) encoding_inds = torch.argmin(dist, dim=1).unsqueeze(1) device = latents.device encoding_one_hot = torch.zeros(encoding_inds.size(0), self.K, device=device) encoding_one_hot.scatter_(1, encoding_inds, 1) quantized_latents = torch.matmul(encoding_one_hot, self.embedding. weight) quantized_latents = quantized_latents.view(latents_shape) commitment_loss = F.mse_loss(quantized_latents.detach(), latents) embedding_loss = F.mse_loss(quantized_latents, latents.detach()) vq_loss = commitment_loss * self.beta + embedding_loss quantized_latents = latents + (quantized_latents - latents).detach() return quantized_latents.permute(0, 3, 1, 2).contiguous(), vq_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_embeddings': 4, 'embedding_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_view_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (y0 // 16) + y0 % 16), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_pow_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tl.store(in_out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_argmin_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 < tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 < tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 < tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_scatter_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_per_fused_add_clone_mse_loss_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r1 = rindex // 4 % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr1 + (r1 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 0.25 tmp10 = tmp8 * tmp9 tmp11 = tmp10 + tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) @triton.jit def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp2 = tmp1 - tmp0 tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + (x2 + 16 * y3), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_clone_mse_loss_mse_loss_backward_6(in_out_ptr0, in_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 0.0078125 tmp4 = tmp2 * tmp3 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_view_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf1) buf2 = buf1 del buf1 triton_poi_fused_add_mul_pow_sub_sum_1[grid(256)](buf2, buf0, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_argmin_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_scatter_3[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 buf5 = buf0 del buf0 extern_kernels.mm(buf4, primals_2, out=buf5) del primals_2 buf6 = empty_strided_cuda((), (), torch.float32) buf9 = buf6 del buf6 triton_per_fused_add_clone_mse_loss_mul_4[grid(1)](buf9, buf5, primals_1, 1, 256, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(16, 16)](primals_1, buf5, buf7, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_clone_mse_loss_mse_loss_backward_6[grid(64, 4)](buf8, primals_1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 return buf7, buf9, buf8, reinterpret_tensor(buf4, (4, 64), (1, 4), 0) class VectorQuantizerNew(nn.Module): """ Reference: [1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta: 'float'=0.25): super(VectorQuantizerNew, self).__init__() self.K = num_embeddings self.D = embedding_dim self.beta = beta self.embedding = nn.Embedding(self.K, self.D) self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K) def forward(self, input_0): primals_2 = self.embedding.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
GilesLuo/PyTorch-VAE
VectorQuantizer
false
5,232
[ "Apache-2.0" ]
1
dab984c7eb1915be9e7cfa7bfa176ad72f7e7a2f
https://github.com/GilesLuo/PyTorch-VAE/tree/dab984c7eb1915be9e7cfa7bfa176ad72f7e7a2f
ResBlock
import torch class ResBlock(torch.nn.Module): def __init__(self, num_channel): super(ResBlock, self).__init__() self.conv1 = torch.nn.Conv2d(num_channel, num_channel, kernel_size= 3, stride=1, padding=1) self.conv2 = torch.nn.Conv2d(num_channel, num_channel, kernel_size= 3, stride=1, padding=1) self.leaky_relu = torch.nn.LeakyReLU() def forward(self, x): out = x out = self.leaky_relu(out) out = self.conv1(out) out = self.leaky_relu(out) out = self.conv2(out) return out + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_convolution_2[grid(256)](buf5, primals_5, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf5, primals_2, primals_4, buf0, buf2, buf3 class ResBlockNew(torch.nn.Module): def __init__(self, num_channel): super(ResBlockNew, self).__init__() self.conv1 = torch.nn.Conv2d(num_channel, num_channel, kernel_size= 3, stride=1, padding=1) self.conv2 = torch.nn.Conv2d(num_channel, num_channel, kernel_size= 3, stride=1, padding=1) self.leaky_relu = torch.nn.LeakyReLU() def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Gregory-Eales/mban
ResBlock
false
5,233
[ "Apache-2.0" ]
1
d8b35db51c7e601b1db777d9a80343600374250b
https://github.com/Gregory-Eales/mban/tree/d8b35db51c7e601b1db777d9a80343600374250b
MultiHeadAttention
import math import torch import torch.nn as nn def dot_scaled_attention(query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor'): """ Dot scaled attention Implement dot-product scaled attention which takes query, key, value and gives attention scores. Arguments: query -- Query tensor in shape (sequence_length, batch_size, d_k) key -- Key tensor in shape (sequence_length, batch_size, d_k) value -- Value tensor in shape (sequence_length, batch_size, d_k) padding_mask -- Padding mask tensor in torch.bool type in shape (sequence_length, batch_size) True for <PAD>, False for non-<PAD> Returns: attention -- Attention result tensor in shape (sequence_length, batch_size, d_k) """ assert query.shape == key.shape == value.shape query_shape = query.shape _seq_len, _, d_k = query_shape QK_t_scaled = torch.bmm(key.permute(1, 0, 2), query.permute(1, 2, 0) ) / math.sqrt(d_k) distribution = nn.functional.softmax(QK_t_scaled, dim=1) attention = torch.bmm(value.permute(1, 2, 0), distribution).permute(2, 0, 1 ) assert attention.shape == query_shape return attention, distribution class MultiHeadAttention(nn.Module): def __init__(self, hidden_dim: 'int', n_head: 'int'=4): """ Multi-head attention initializer Use below attributes to implement the forward function Attributes: n_head -- the number of heads d_k -- Hidden dimension of the dot scaled attention V_linear -- Linear function to project hidden_dim of value to d_k K_linear -- Linear function to project hidden_dim of key to d_k Q_linear -- Linear function to project hidden_dim of query to d_k O_linear -- Linear function to project collections of d_k to hidden_dim """ super().__init__() assert hidden_dim % n_head == 0 self.n_head = n_head self.d_k = hidden_dim // n_head self.V_linear = nn.Linear(hidden_dim, self.n_head * self.d_k, bias= False) self.K_linear = nn.Linear(hidden_dim, self.n_head * self.d_k, bias= False) self.Q_linear = nn.Linear(hidden_dim, self.n_head * self.d_k, bias= False) self.O_linear = nn.Linear(self.n_head * self.d_k, hidden_dim, bias= False) def forward(self, value: 'torch.Tensor', key: 'torch.Tensor', query: 'torch.Tensor'): """ Multi-head attention forward function Implement multi-head attention which takes value, key, query, and gives attention score. Use dot-scaled attention you have implemented above. Note: If you adjust the dimension of batch_size dynamically, you can implement this function without any iteration. Parameters: value -- Value tensor in shape (sequence_length, batch_size, hidden_dim) key -- Key tensor in shape (sequence_length, batch_size, hidden_dim) query -- Query tensor in shape (sequence_length, batch_size, hidden_dim) Returns: attention -- Attention result tensor in shape (sequence_length, batch_size, hidden_dim) """ assert value.shape == key.shape == query.shape input_shape = value.shape _seq_length, batch_size, _hidden_dim = input_shape Q_embed_concat = torch.cat(self.Q_linear(query.permute(1, 0, 2)). split(self.d_k, dim=2), 0).permute(1, 0, 2) K_embed_concat = torch.cat(self.K_linear(key.permute(1, 0, 2)). split(self.d_k, dim=2), 0).permute(1, 0, 2) V_embed_concat = torch.cat(self.V_linear(value.permute(1, 0, 2)). split(self.d_k, dim=2), 0).permute(1, 0, 2) attention_stacked, distribution = dot_scaled_attention(query= Q_embed_concat, key=K_embed_concat, value=V_embed_concat) attention = self.O_linear(torch.cat(attention_stacked.permute(1, 0, 2).split(batch_size, dim=0), 2)).permute(1, 0, 2) assert attention.shape == input_shape return attention, distribution def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x1), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (32 + x1), tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr0 + (48 + x1), tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_3, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64)](primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64)](primals_1, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5) del primals_6 buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_cat_1[grid(64)](buf3, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 triton_poi_fused_cat_1[grid(64)](buf1, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = buf8 del buf8 triton_poi_fused__softmax_3[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 triton_poi_fused_cat_1[grid(64)](buf5, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf5, (16, 1, 4), (4, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf11, (16, 1, 4), (4, 0, 1), 0), buf10, out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_cat_4[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0) del buf12 extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf14) return reinterpret_tensor(buf14, (4, 4, 4), (4, 16, 1), 0 ), buf10, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor( buf4, (16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), primals_7, buf11, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), buf7 def dot_scaled_attention(query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor'): """ Dot scaled attention Implement dot-product scaled attention which takes query, key, value and gives attention scores. Arguments: query -- Query tensor in shape (sequence_length, batch_size, d_k) key -- Key tensor in shape (sequence_length, batch_size, d_k) value -- Value tensor in shape (sequence_length, batch_size, d_k) padding_mask -- Padding mask tensor in torch.bool type in shape (sequence_length, batch_size) True for <PAD>, False for non-<PAD> Returns: attention -- Attention result tensor in shape (sequence_length, batch_size, d_k) """ assert query.shape == key.shape == value.shape query_shape = query.shape _seq_len, _, d_k = query_shape QK_t_scaled = torch.bmm(key.permute(1, 0, 2), query.permute(1, 2, 0) ) / math.sqrt(d_k) distribution = nn.functional.softmax(QK_t_scaled, dim=1) attention = torch.bmm(value.permute(1, 2, 0), distribution).permute(2, 0, 1 ) assert attention.shape == query_shape return attention, distribution class MultiHeadAttentionNew(nn.Module): def __init__(self, hidden_dim: 'int', n_head: 'int'=4): """ Multi-head attention initializer Use below attributes to implement the forward function Attributes: n_head -- the number of heads d_k -- Hidden dimension of the dot scaled attention V_linear -- Linear function to project hidden_dim of value to d_k K_linear -- Linear function to project hidden_dim of key to d_k Q_linear -- Linear function to project hidden_dim of query to d_k O_linear -- Linear function to project collections of d_k to hidden_dim """ super().__init__() assert hidden_dim % n_head == 0 self.n_head = n_head self.d_k = hidden_dim // n_head self.V_linear = nn.Linear(hidden_dim, self.n_head * self.d_k, bias= False) self.K_linear = nn.Linear(hidden_dim, self.n_head * self.d_k, bias= False) self.Q_linear = nn.Linear(hidden_dim, self.n_head * self.d_k, bias= False) self.O_linear = nn.Linear(self.n_head * self.d_k, hidden_dim, bias= False) def forward(self, input_0, input_1, input_2): primals_4 = self.V_linear.weight primals_5 = self.K_linear.weight primals_6 = self.Q_linear.weight primals_7 = self.O_linear.weight primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Giseung-Park/BlockSeq
MultiHeadAttention
false
5,234
[ "MIT" ]
1
73dd55e6e500c765396fb7bcb514c9cbe7d799ac
https://github.com/Giseung-Park/BlockSeq/tree/73dd55e6e500c765396fb7bcb514c9cbe7d799ac
UpsamplerModel
import torch import numpy as np import torch.nn as nn class UpsamplerModel(nn.Module): def __init__(self, output_shape, factor): assert output_shape[0] % factor == 0 assert output_shape[1] % factor == 0 super(UpsamplerModel, self).__init__() self.output_shape = output_shape seed = np.ones((1, 1, output_shape[0] // factor, output_shape[1] // factor)) * 0.5 self.sigmoid = nn.Sigmoid() self.seed = nn.Parameter(data=torch.FloatTensor(seed), requires_grad=True) def forward(self): return nn.functional.interpolate(self.sigmoid(self.seed), size=self .output_shape, mode='bilinear') def get_inputs(): return [] def get_init_inputs(): return [[], {'output_shape': [4, 4], 'factor': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.sigmoid(tmp1) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + 0) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp11 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 1, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tl.where(tmp7, tmp6, tmp5) tmp12 = tmp11 + tmp1 tmp13 = tmp11 < 0 tl.where(tmp13, tmp12, tmp11) tmp15 = tmp10 - tmp10 tmp17 = tmp15 * tmp16 tmp18 = tmp10 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tl.where(tmp21, tmp20, tmp19) tmp23 = tmp18 - tmp18 tmp25 = tmp23 * tmp24 tmp26 = tmp18 + tmp25 tl.store(in_out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(1)](primals_1, buf0, 1, XBLOCK=1, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf2, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf3 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf4, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf8 = buf6 del buf6 triton_poi_fused__unsafe_index_add_mul_sub_4[grid(16)](buf8, buf1, buf3, buf0, buf4, buf5, buf2, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf8, buf0, buf1, buf2, buf3, buf4, buf5, buf7 class UpsamplerModelNew(nn.Module): def __init__(self, output_shape, factor): assert output_shape[0] % factor == 0 assert output_shape[1] % factor == 0 super(UpsamplerModelNew, self).__init__() self.output_shape = output_shape seed = np.ones((1, 1, output_shape[0] // factor, output_shape[1] // factor)) * 0.5 self.sigmoid = nn.Sigmoid() self.seed = nn.Parameter(data=torch.FloatTensor(seed), requires_grad=True) def forward(self): primals_1 = self.seed output = call([primals_1]) return output[0]
GuYuanjie/DeepFusionPrior
UpsamplerModel
false
5,235
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
Linear
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_3, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class LinearNew(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(LinearNew, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
GrumpyZhou/pytorch_geometric
Linear
false
5,236
[ "MIT" ]
1
88c54e72d3e26ad48e9ccd99e5696c7f19269d94
https://github.com/GrumpyZhou/pytorch_geometric/tree/88c54e72d3e26ad48e9ccd99e5696c7f19269d94
FixedBlurLayer
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class FixedBlurLayer(nn.Module): def __init__(self, kernel): super(FixedBlurLayer, self).__init__() self.kernel = kernel to_pad_x = int((self.kernel.shape[0] - 1) / 2) to_pad_y = int((self.kernel.shape[1] - 1) / 2) self.pad = nn.ReflectionPad2d((to_pad_x, to_pad_x, to_pad_y, to_pad_y)) self.mask_np = np.zeros(shape=(1, 3, self.kernel.shape[0], self. kernel.shape[1])) self.mask_np[0, 0, :, :] = self.kernel self.mask_np[0, 1, :, :] = self.kernel self.mask_np[0, 2, :, :] = self.kernel self.mask = nn.Parameter(data=torch.FloatTensor(self.mask_np), requires_grad=False) def forward(self, x): return F.conv2d(self.pad(x), self.mask) def get_inputs(): return [torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {'kernel': torch.rand([4, 4])}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 6 x3 = xindex // 6 y4 = yindex x5 = xindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x2)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x3)) + 16 * y4), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x5 + 108 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 3 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x1), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(arg1_1, (1, 3, 4, 4), (48, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 6, 6), (108, 1, 18, 3), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(12, 36)](arg0_1, buf0, 12, 36, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((1, 3, 4, 4), (48, 1, 12, 3), torch.float32) triton_poi_fused_convolution_reflection_pad2d_1[grid(3, 16)](arg1_1, buf1, 3, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del arg1_1 buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 3, 3), (9, 1, 3, 1)) del buf0 del buf1 return buf2, class FixedBlurLayerNew(nn.Module): def __init__(self, kernel): super(FixedBlurLayerNew, self).__init__() self.kernel = kernel to_pad_x = int((self.kernel.shape[0] - 1) / 2) to_pad_y = int((self.kernel.shape[1] - 1) / 2) self.pad = nn.ReflectionPad2d((to_pad_x, to_pad_x, to_pad_y, to_pad_y)) self.mask_np = np.zeros(shape=(1, 3, self.kernel.shape[0], self. kernel.shape[1])) self.mask_np[0, 0, :, :] = self.kernel self.mask_np[0, 1, :, :] = self.kernel self.mask_np[0, 2, :, :] = self.kernel self.mask = nn.Parameter(data=torch.FloatTensor(self.mask_np), requires_grad=False) def forward(self, input_0): arg1_1 = self.mask arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
GuYuanjie/DeepFusionPrior
FixedBlurLayer
false
5,237
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
CovarianceLayer
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class CovarianceLayer(nn.Module): def __init__(self, patch_size=5, channels=1): self.patch_size = patch_size super(CovarianceLayer, self).__init__() mean_mask = np.ones((channels, channels, patch_size, patch_size)) / ( patch_size * patch_size) self.mean_mask = nn.Parameter(data=torch.FloatTensor(mean_mask), requires_grad=False) mask = np.zeros((channels, channels, patch_size, patch_size)) mask[:, :, patch_size // 2, patch_size // 2] = 1.0 self.ones_mask = nn.Parameter(data=torch.FloatTensor(mask), requires_grad=False) def forward(self, x, y): return F.conv2d((F.conv2d(x, self.ones_mask) - F.conv2d(x, self. mean_mask)) * (F.conv2d(y, self.ones_mask) - F.conv2d(y, self. mean_mask)), self.mean_mask) def get_inputs(): return [torch.rand([4, 1, 64, 64]), torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 14400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tmp2 * tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (1, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg2_1, (1, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(arg3_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 60, 60), (3600, 3600, 60, 1)) buf1 = extern_kernels.convolution(arg1_1, arg2_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 60, 60), (3600, 3600, 60, 1)) del arg1_1 buf2 = extern_kernels.convolution(arg3_1, arg0_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 60, 60), (3600, 3600, 60, 1)) del arg0_1 buf3 = extern_kernels.convolution(arg3_1, arg2_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 60, 60), (3600, 3600, 60, 1)) del arg3_1 buf4 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_mul_sub_0[grid(14400)](buf4, buf1, buf2, buf3, 14400, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del buf3 buf5 = extern_kernels.convolution(buf4, arg2_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 1, 56, 56), (3136, 3136, 56, 1)) del arg2_1 del buf4 return buf5, class CovarianceLayerNew(nn.Module): def __init__(self, patch_size=5, channels=1): self.patch_size = patch_size super(CovarianceLayerNew, self).__init__() mean_mask = np.ones((channels, channels, patch_size, patch_size)) / ( patch_size * patch_size) self.mean_mask = nn.Parameter(data=torch.FloatTensor(mean_mask), requires_grad=False) mask = np.zeros((channels, channels, patch_size, patch_size)) mask[:, :, patch_size // 2, patch_size // 2] = 1.0 self.ones_mask = nn.Parameter(data=torch.FloatTensor(mask), requires_grad=False) def forward(self, input_0, input_1): arg0_1 = self.mean_mask arg2_1 = self.ones_mask arg1_1 = input_0 arg3_1 = input_1 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
GuYuanjie/DeepFusionPrior
CovarianceLayer
false
5,238
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
Attention
import math import torch import torch.nn.functional as F import torch.utils.data def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1, buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf4 = buf0 del buf0 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class AttentionNew(torch.nn.Module): def __init__(self, dropout=0): super(AttentionNew, self).__init__() self.dropout = dropout def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
GrumpyZhou/pytorch_geometric
Attention
false
5,239
[ "MIT" ]
1
88c54e72d3e26ad48e9ccd99e5696c7f19269d94
https://github.com/GrumpyZhou/pytorch_geometric/tree/88c54e72d3e26ad48e9ccd99e5696c7f19269d94
MixerBlock
import torch import torch.nn.functional as F from torch import nn class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class ChannelMixer(nn.Module): def __init__(self, d_model, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(d_model, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = self.mlp(x) out = x + residual return out class TokenMixer(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(seq_len, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = x.transpose(1, 2) x = self.mlp(x) x = x.transpose(1, 2) out = x + residual return out class MixerBlock(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.token_mixer = TokenMixer(d_model, seq_len, expansion_factor, dropout) self.channel_mixer = ChannelMixer(d_model, expansion_factor, dropout) def forward(self, x): x = self.token_mixer(x) x = self.channel_mixer(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'seq_len': 4, 'expansion_factor': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x5 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp8, xmask) @triton.jit def triton_poi_fused_add_gelu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) tl.store(out_ptr1 + x3, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + x4, xmask) tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_gelu_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp4 = tl.load(in_ptr2 + x4, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_add_gelu_2[grid(1024)](buf3, primals_5, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = buf1 del buf1 buf7 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_3[grid(64)](buf5, primals_1, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(256)](buf5, primals_1, buf6, buf7, primals_8, primals_9, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del buf7 del primals_9 buf9 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_11 buf10 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_gelu_5[grid(1024)](buf9, buf10, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (64, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf11 triton_poi_fused_add_6[grid(256)](buf12, primals_13, buf5, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 return buf12, primals_1, primals_5, primals_8, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0), buf5, reinterpret_tensor(buf8, (64, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (64, 16), (16, 1), 0 ), primals_12, primals_10, primals_6, primals_4 class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class ChannelMixer(nn.Module): def __init__(self, d_model, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(d_model, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = self.mlp(x) out = x + residual return out class TokenMixer(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(d_model) self.mlp = FeedForward(seq_len, expansion_factor, dropout) def forward(self, x): residual = x x = self.norm(x) x = x.transpose(1, 2) x = self.mlp(x) x = x.transpose(1, 2) out = x + residual return out class MixerBlockNew(nn.Module): def __init__(self, d_model, seq_len, expansion_factor, dropout): super().__init__() self.token_mixer = TokenMixer(d_model, seq_len, expansion_factor, dropout) self.channel_mixer = ChannelMixer(d_model, expansion_factor, dropout) def forward(self, input_0): primals_2 = self.token_mixer.norm.weight primals_3 = self.token_mixer.norm.bias primals_4 = self.token_mixer.mlp.fc1.weight primals_5 = self.token_mixer.mlp.fc1.bias primals_6 = self.token_mixer.mlp.fc2.weight primals_7 = self.token_mixer.mlp.fc2.bias primals_8 = self.channel_mixer.norm.weight primals_9 = self.channel_mixer.norm.bias primals_10 = self.channel_mixer.mlp.fc1.weight primals_11 = self.channel_mixer.mlp.fc1.bias primals_12 = self.channel_mixer.mlp.fc2.weight primals_13 = self.channel_mixer.mlp.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
GimmeSpoon/mlp-singer
MixerBlock
false
5,240
[ "MIT" ]
1
36d10a23c46fa7400994ccd063de79ff089efd5e
https://github.com/GimmeSpoon/mlp-singer/tree/36d10a23c46fa7400994ccd063de79ff089efd5e
My_loss2
import torch import torch.nn as nn class My_loss2(nn.Module): def __init__(self): super().__init__() def forward(self, x, y, batch_size, mask): return torch.sum(torch.pow(x - y, 2) * mask) / batch_size / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp9 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp3 * tmp4 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp10 = tmp8 / tmp9 tmp11 = 0.5 tmp12 = tmp10 * tmp11 tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp12, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_pow_sub_sum_0[grid(1)](arg0_1, arg1_1, arg2_1, arg3_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf1, class My_loss2New(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
H-Liu1997/Pytorch_Pose_Estimation_Framework
My_loss2
false
5,241
[ "MIT" ]
1
06616b3459ff639f8486e6ea4f93922597788b2a
https://github.com/H-Liu1997/Pytorch_Pose_Estimation_Framework/tree/06616b3459ff639f8486e6ea4f93922597788b2a
NoiseNet
import torch import torch.nn as nn import torch.nn.functional as F class NoiseNet(nn.Module): def __init__(self, channels=3, kernel_size=5): super(NoiseNet, self).__init__() self.kernel_size = kernel_size self.channels = channels to_pad = int((self.kernel_size - 1) / 2) self.padder = nn.ReflectionPad2d(to_pad).type(torch.FloatTensor) to_pad = 0 self.convolver = nn.Conv2d(channels, channels, self.kernel_size, 1, padding=to_pad, bias=True).type(torch.FloatTensor) def forward(self, x): assert x.shape[1] == self.channels, (x.shape, self.channels) first = F.relu(self.convolver(self.padder(x))) second = F.relu(self.convolver(self.padder(first))) third = F.relu(self.convolver(self.padder(second))) assert x.shape == third.shape, (x.shape, third.shape) return third def get_inputs(): return [torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x4 = xindex // 64 x2 = xindex // 64 % 3 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + x3, xmask) tmp11 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp10 = tmp9 <= tmp5 tmp12 = tmp11 + tmp1 tmp13 = triton_helpers.maximum(tmp3, tmp12) tmp14 = tmp13 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp10, xmask) tl.store(out_ptr2 + x3, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (3, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 8, 8), (192, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(768)](primals_1, buf0, 768, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 3, 4, 4), (48, 16, 4, 1)) buf2 = empty_strided_cuda((4, 3, 8, 8), (192, 64, 8, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_1[grid(768)](buf1, primals_3, buf2, 768, XBLOCK=256, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 3, 4, 4), (48, 16, 4, 1)) buf4 = empty_strided_cuda((4, 3, 8, 8), (192, 64, 8, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_1[grid(768)](buf3, primals_3, buf4, 768, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 3, 4, 4), (48, 16, 4, 1)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) buf8 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) buf9 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(192)](buf6, primals_3, buf3, buf1, buf7, buf8, buf9, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del primals_3 return buf6, primals_2, buf0, buf2, buf4, buf7, buf8, buf9 class NoiseNetNew(nn.Module): def __init__(self, channels=3, kernel_size=5): super(NoiseNetNew, self).__init__() self.kernel_size = kernel_size self.channels = channels to_pad = int((self.kernel_size - 1) / 2) self.padder = nn.ReflectionPad2d(to_pad).type(torch.FloatTensor) to_pad = 0 self.convolver = nn.Conv2d(channels, channels, self.kernel_size, 1, padding=to_pad, bias=True).type(torch.FloatTensor) def forward(self, input_0): primals_2 = self.convolver.weight primals_3 = self.convolver.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
GuYuanjie/DeepFusionPrior
NoiseNet
false
5,242
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
PixelNorm
import torch import torch.nn as nn def pixel_norm(x, eps=1e-06): """Pixel Normalization. This normalization is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: x (torch.Tensor): Tensor to be normalized. eps (float, optional): Epsilon to avoid dividing zero. Defaults to 1e-6. Returns: torch.Tensor: Normalized tensor. """ if torch.__version__ >= '1.7.0': norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True) else: norm = torch.norm(x, p=2, dim=1, keepdim=True) norm = norm / torch.sqrt(torch.tensor(x.shape[1])) return x / (norm + eps) class PixelNorm(nn.Module): """Pixel Normalization. This module is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: eps (float, optional): Epsilon value. Defaults to 1e-6. """ _abbr_ = 'pn' def __init__(self, in_channels=None, eps=1e-06): super().__init__() self.eps = eps def forward(self, x): """Forward function. Args: x (torch.Tensor): Tensor to be normalized. Returns: torch.Tensor: Normalized tensor. """ return pixel_norm(x, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 0.5 tmp14 = tmp12 * tmp13 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tmp0 / tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, def pixel_norm(x, eps=1e-06): """Pixel Normalization. This normalization is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: x (torch.Tensor): Tensor to be normalized. eps (float, optional): Epsilon to avoid dividing zero. Defaults to 1e-6. Returns: torch.Tensor: Normalized tensor. """ if torch.__version__ >= '1.7.0': norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True) else: norm = torch.norm(x, p=2, dim=1, keepdim=True) norm = norm / torch.sqrt(torch.tensor(x.shape[1])) return x / (norm + eps) class PixelNormNew(nn.Module): """Pixel Normalization. This module is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: eps (float, optional): Epsilon value. Defaults to 1e-6. """ _abbr_ = 'pn' def __init__(self, in_channels=None, eps=1e-06): super().__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
HXWAndCL/mmgeneration
PixelNorm
false
5,243
[ "Apache-2.0" ]
1
9afb1d740bf56a4ecde5064d5bb2a4e2d777638b
https://github.com/HXWAndCL/mmgeneration/tree/9afb1d740bf56a4ecde5064d5bb2a4e2d777638b
MultiHeadAttention
from torch.nn import Module import torch import numpy as np import torch.nn as nn import torch.utils.data import torch.nn class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class MultiHeadAttention(Module): """ Multi-head attention layer with Dropout and Layer Normalization. """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1, identity_map_reordering=False, can_be_stateful=False, attention_module=None, attention_module_kwargs=None): super(MultiHeadAttention, self).__init__() self.identity_map_reordering = identity_map_reordering if attention_module is not None: if attention_module_kwargs is not None: self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h, **attention_module_kwargs) else: self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h) else: self.attention = ScaledDotProductAttention(d_model=d_model, d_k =d_k, d_v=d_v, h=h) self.dropout = nn.Dropout(p=dropout) self.layer_norm = nn.LayerNorm(d_model) self.can_be_stateful = can_be_stateful if self.can_be_stateful: self.register_state('running_keys', torch.zeros((0, d_model))) self.register_state('running_values', torch.zeros((0, d_model))) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): if self.can_be_stateful and self._is_stateful: self.running_keys = torch.cat([self.running_keys, keys], 1) keys = self.running_keys self.running_values = torch.cat([self.running_values, values], 1) values = self.running_values if self.identity_map_reordering: q_norm = self.layer_norm(queries) k_norm = self.layer_norm(keys) v_norm = self.layer_norm(values) out = self.attention(q_norm, k_norm, v_norm, attention_mask, attention_weights) out = queries + self.dropout(torch.relu(out)) else: out = self.attention(queries, keys, values, attention_mask, attention_weights) out = self.dropout(out) out = self.layer_norm(queries + out) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module import numpy as np import torch.nn as nn import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 2.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6.to(tl.float64) tmp21 = tmp20 * tmp1 tmp22 = tmp21.to(tl.float32) tmp23 = tmp19 / tmp22 tmp24 = tl_math.exp(tmp23) tl.store(out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, primals_4, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(64, 4)](buf1, primals_6, buf4, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_sqrt_2[grid(256)](buf5, buf6, 256, XBLOCK =256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_clone_0[grid(256)](buf2, primals_8, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf11) del primals_11 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf11, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf11, buf12, buf13, primals_12, primals_13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_13 return buf14, primals_1, primals_12, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0 ), buf11, primals_10, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class MultiHeadAttentionNew(Module): """ Multi-head attention layer with Dropout and Layer Normalization. """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1, identity_map_reordering=False, can_be_stateful=False, attention_module=None, attention_module_kwargs=None): super(MultiHeadAttentionNew, self).__init__() self.identity_map_reordering = identity_map_reordering if attention_module is not None: if attention_module_kwargs is not None: self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h, **attention_module_kwargs) else: self.attention = attention_module(d_model=d_model, d_k=d_k, d_v=d_v, h=h) else: self.attention = ScaledDotProductAttention(d_model=d_model, d_k =d_k, d_v=d_v, h=h) self.dropout = nn.Dropout(p=dropout) self.layer_norm = nn.LayerNorm(d_model) self.can_be_stateful = can_be_stateful if self.can_be_stateful: self.register_state('running_keys', torch.zeros((0, d_model))) self.register_state('running_values', torch.zeros((0, d_model))) def forward(self, input_0, input_1, input_2): primals_3 = self.attention.fc_q.weight primals_4 = self.attention.fc_q.bias primals_5 = self.attention.fc_k.weight primals_6 = self.attention.fc_k.bias primals_7 = self.attention.fc_v.weight primals_8 = self.attention.fc_v.bias primals_10 = self.attention.fc_o.weight primals_11 = self.attention.fc_o.bias primals_12 = self.layer_norm.weight primals_13 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
GavinGuan95/Generative-VQA
MultiHeadAttention
false
5,244
[ "MIT" ]
1
0912e3a2426809ef4d4eb40bae667b31c2269161
https://github.com/GavinGuan95/Generative-VQA/tree/0912e3a2426809ef4d4eb40bae667b31c2269161
My_loss_focus
import torch import torch.nn as nn class My_loss_focus(nn.Module): def __init__(self): super().__init__() def forward(self, x, y, batch_size): return torch.sum(torch.pow(x - y, 4)) / batch_size def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp8 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp9 = tmp7 / tmp8 tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp9, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_pow_sub_sum_0[grid(1)](arg0_1, arg1_1, arg2_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class My_loss_focusNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
H-Liu1997/Pytorch_Pose_Estimation_Framework
My_loss_focus
false
5,245
[ "MIT" ]
1
06616b3459ff639f8486e6ea4f93922597788b2a
https://github.com/H-Liu1997/Pytorch_Pose_Estimation_Framework/tree/06616b3459ff639f8486e6ea4f93922597788b2a
StdLoss
import torch import numpy as np import torch.nn as nn from torch.nn import functional class GrayscaleLayer(nn.Module): def __init__(self): super(GrayscaleLayer, self).__init__() def forward(self, x): return torch.mean(x, 1, keepdim=True) class StdLoss(nn.Module): def __init__(self): """ Loss on the variance of the image. Works in the grayscale. If the image is smooth, gets zero """ super(StdLoss, self).__init__() blur = 1 / 25 * np.ones((5, 5)) blur = blur.reshape(1, 1, blur.shape[0], blur.shape[1]) self.mse = nn.MSELoss() self.blur = nn.Parameter(data=torch.FloatTensor(blur), requires_grad=False) image = np.zeros((5, 5)) image[2, 2] = 1 image = image.reshape(1, 1, image.shape[0], image.shape[1]) self.image = nn.Parameter(data=torch.FloatTensor(image), requires_grad=False) self.gray_scale = GrayscaleLayer() def forward(self, x): x = self.gray_scale(x) return self.mse(functional.conv2d(x, self.image), functional.conv2d (x, self.blur)) def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4096 x1 = xindex // 4096 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16384 * x1), None) tmp1 = tl.load(in_ptr0 + (4096 + x0 + 16384 * x1), None) tmp3 = tl.load(in_ptr0 + (8192 + x0 + 16384 * x1), None) tmp5 = tl.load(in_ptr0 + (12288 + x0 + 16384 * x1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_red_fused_mse_loss_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 2 rnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 7200 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 7200 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = _tmp5 + tmp4 _tmp5 = tl.where(rmask & xmask, tmp6, _tmp5) tmp5 = tl.sum(_tmp5, 1)[:, None] tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_per_fused_mse_loss_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 14400.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(arg1_1, (1, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(arg2_1, (1, 1, 5, 5), (25, 25, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16384)](arg0_1, buf0, 16384, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 60, 60), (3600, 3600, 60, 1)) del arg1_1 buf2 = extern_kernels.convolution(buf0, arg2_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 60, 60), (3600, 3600, 60, 1)) del arg2_1 del buf0 buf3 = empty_strided_cuda((2,), (1,), torch.float32) triton_red_fused_mse_loss_1[grid(2)](buf1, buf2, buf3, 2, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del buf1 del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_mse_loss_2[grid(1)](buf5, buf3, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf3 return buf5, class GrayscaleLayer(nn.Module): def __init__(self): super(GrayscaleLayer, self).__init__() def forward(self, x): return torch.mean(x, 1, keepdim=True) class StdLossNew(nn.Module): def __init__(self): """ Loss on the variance of the image. Works in the grayscale. If the image is smooth, gets zero """ super(StdLossNew, self).__init__() blur = 1 / 25 * np.ones((5, 5)) blur = blur.reshape(1, 1, blur.shape[0], blur.shape[1]) self.mse = nn.MSELoss() self.blur = nn.Parameter(data=torch.FloatTensor(blur), requires_grad=False) image = np.zeros((5, 5)) image[2, 2] = 1 image = image.reshape(1, 1, image.shape[0], image.shape[1]) self.image = nn.Parameter(data=torch.FloatTensor(image), requires_grad=False) self.gray_scale = GrayscaleLayer() def forward(self, input_0): arg1_1 = self.blur arg2_1 = self.image arg0_1 = input_0 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
GuYuanjie/DeepFusionPrior
StdLoss
false
5,246
[ "MIT" ]
1
a7126e073ed8c49b6a9a662492b64aaeee56cc01
https://github.com/GuYuanjie/DeepFusionPrior/tree/a7126e073ed8c49b6a9a662492b64aaeee56cc01
LinearModel
import torch import torch.nn as nn import torch.autograd import torch.backends.cudnn class LinearModel(nn.Module): """ NetModel class for the neural network. inherits from NetModel. """ def __init__(self, input_size, output_size, hidden_size): """ Initialize the model. :param input_size: :param output_size: """ super(LinearModel, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.ReLU() self.fc3 = nn.Linear(hidden_size, 50) self.fc4 = nn.PReLU() self.fc5 = nn.Linear(50, output_size) self.out = nn.Sigmoid() def forward(self, x): """ Forward pass of the model. :param x: :return: logits """ x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) x = self.fc4(x) x = self.fc5(x) return self.out(x) def save(self, filename: 'str'): """ Save the model to a file. :param filename: :return: None """ path = Path(filename) with click_spinner.spinner('Saving model to {}'.format(path)): with path.open('wb') as f: torch.save(self, f) typer.secho(f'{self.__class__.__name__} saved', fg='green') return None @classmethod def load(cls, path): """ Load a model from a file. :param path: path to the model file (str) :return: NetModel instance """ path = Path(path) with click_spinner.spinner('Loading model from {}'.format(path)): with path.open('rb') as f: model = torch.load(f) name = model.__class__.__name__ typer.secho(f'{name} loaded', fg='green') return model def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.autograd import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (50, 4), (4, 1)) assert_size_stride(primals_5, (50,), (1,)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 50), (50, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 50), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch. float32) triton_poi_fused__prelu_kernel_1[grid(3200)](buf2, primals_6, buf3, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 50), (50, 1), 0), reinterpret_tensor(primals_7, (50, 4), (1, 50), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(256)](buf5, primals_8, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_8 return buf5, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 50), (50, 1), 0 ), buf5, primals_7, primals_4, buf6 class LinearModelNew(nn.Module): """ NetModel class for the neural network. inherits from NetModel. """ def __init__(self, input_size, output_size, hidden_size): """ Initialize the model. :param input_size: :param output_size: """ super(LinearModelNew, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.ReLU() self.fc3 = nn.Linear(hidden_size, 50) self.fc4 = nn.PReLU() self.fc5 = nn.Linear(50, output_size) self.out = nn.Sigmoid() def save(self, filename: 'str'): """ Save the model to a file. :param filename: :return: None """ path = Path(filename) with click_spinner.spinner('Saving model to {}'.format(path)): with path.open('wb') as f: torch.save(self, f) typer.secho(f'{self.__class__.__name__} saved', fg='green') return None @classmethod def load(cls, path): """ Load a model from a file. :param path: path to the model file (str) :return: NetModel instance """ path = Path(path) with click_spinner.spinner('Loading model from {}'.format(path)): with path.open('rb') as f: model = torch.load(f) name = model.__class__.__name__ typer.secho(f'{name} loaded', fg='green') return model def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc3.weight primals_5 = self.fc3.bias primals_6 = self.fc4.weight primals_7 = self.fc5.weight primals_8 = self.fc5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Guydada/MIND-Recommender-System-Ptoject-Pytorch-TF-IDF--Deep-Learning
LinearModel
false
5,247
[ "MIT" ]
1
1f42db2f5bc29d6bafbd3261407b41ab1a6eae95
https://github.com/Guydada/MIND-Recommender-System-Ptoject-Pytorch-TF-IDF--Deep-Learning/tree/1f42db2f5bc29d6bafbd3261407b41ab1a6eae95
AdaptiveInstanceNorm
import torch import torch.nn as nn from torch.nn.init import _calculate_correct_fan def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class AdaptiveInstanceNorm(nn.Module): """Adaptive Instance Normalization Module. Ref: https://github.com/rosinality/style-based-gan-pytorch/blob/master/model.py # noqa Args: in_channel (int): The number of input's channel. style_dim (int): Style latent dimension. """ def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.affine = EqualizedLRLinearModule(style_dim, in_channel * 2) self.affine.bias.data[:in_channel] = 1 self.affine.bias.data[in_channel:] = 0 def forward(self, input, style): """Forward function. Args: input (Tensor): Input tensor with shape (n, c, h, w). style (Tensor): Input style tensor with shape (n, c). Returns: Tensor: Forward results. """ style = self.affine(style).unsqueeze(2).unsqueeze(3) gamma, beta = style.chunk(2, 1) out = self.norm(input) out = gamma * out + beta return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_lift_fresh_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.4142135381698608 tmp2 = tmp0 * tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp24 = tmp22 + tmp23 tmp25 = tmp0 - tmp10 tmp26 = tmp25 * tmp21 tmp27 = tmp24 * tmp26 tmp30 = tmp28 + tmp29 tmp31 = tmp27 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_lift_fresh_mul_sqrt_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4 ), 0), out=buf1) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5, primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del primals_2 return buf6, buf0, primals_3, primals_4, buf2, buf5 def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class AdaptiveInstanceNormNew(nn.Module): """Adaptive Instance Normalization Module. Ref: https://github.com/rosinality/style-based-gan-pytorch/blob/master/model.py # noqa Args: in_channel (int): The number of input's channel. style_dim (int): Style latent dimension. """ def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.affine = EqualizedLRLinearModule(style_dim, in_channel * 2) self.affine.bias.data[:in_channel] = 1 self.affine.bias.data[in_channel:] = 0 def forward(self, input_0, input_1): primals_2 = self.affine.bias primals_1 = self.affine.weight_orig primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
HXWAndCL/mmgeneration
AdaptiveInstanceNorm
false
5,248
[ "Apache-2.0" ]
1
9afb1d740bf56a4ecde5064d5bb2a4e2d777638b
https://github.com/HXWAndCL/mmgeneration/tree/9afb1d740bf56a4ecde5064d5bb2a4e2d777638b
My_loss_offset
import torch import torch.nn as nn class My_loss_offset(nn.Module): def __init__(self): super().__init__() def forward(self, x, mask, y, batch_size): return torch.sum(torch.abs(torch.pow(x - y, 2) * mask) ) / batch_size / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp10 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp3 * tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp11 = tmp9 / tmp10 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp13, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_div_mul_pow_sub_sum_0[grid(1)](arg0_1, arg1_1, arg2_1, arg3_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf1, class My_loss_offsetNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
H-Liu1997/Pytorch_Pose_Estimation_Framework
My_loss_offset
false
5,249
[ "MIT" ]
1
06616b3459ff639f8486e6ea4f93922597788b2a
https://github.com/H-Liu1997/Pytorch_Pose_Estimation_Framework/tree/06616b3459ff639f8486e6ea4f93922597788b2a