entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
LSTMAttentionLayer
|
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torch.onnx.operators
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class LSTMAttentionLayer(nn.Module):
def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim,
bias=False, dropout=0.0):
super().__init__()
self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)
self.output_proj = Linear(input_embed_dim + source_embed_dim,
output_embed_dim, bias=bias)
self.dropout = dropout
def forward(self, input, source_hids, encoder_padding_mask=None,
enc_dec_attn_constraint_mask=None):
x = self.input_proj(input)
attn_weights = torch.bmm(x.transpose(0, 1), source_hids.transpose(0,
1).transpose(1, 2))
if encoder_padding_mask is not None:
attn_weights = attn_weights.float().masked_fill_(
encoder_padding_mask.unsqueeze(1), float('-inf')).type_as(
attn_weights)
if enc_dec_attn_constraint_mask is not None:
attn_weights = attn_weights.float().masked_fill_(
enc_dec_attn_constraint_mask.bool(), float('-inf')).type_as(
attn_weights)
attn_logits = attn_weights
sz = attn_weights.size()
attn_scores = F.softmax(attn_weights.view(sz[0] * sz[1], sz[2]), dim=1)
attn_scores = attn_scores.view(sz)
attn_scores = F.dropout(attn_scores, p=self.dropout, training=self.
training)
attn = torch.bmm(attn_scores, source_hids.transpose(0, 1)).transpose(
0, 1)
x = torch.tanh(self.output_proj(torch.cat((attn, input), dim=-1)))
return x, attn_scores, attn_logits
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_embed_dim': 4, 'source_embed_dim': 4,
'output_embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils
import torch.optim
import torch.utils.data
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 4
x2 = xindex // 32
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (4, 16, 1),
0), reinterpret_tensor(primals_3, (4, 4, 4), (4, 1, 16), 0),
out=buf1)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_3, (4, 4, 4), (4, 16, 1), 0),
out=buf4)
buf5 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf4, primals_2, buf5, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
extern_kernels.mm(reinterpret_tensor(buf5, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0)
del buf6
triton_poi_fused_tanh_3[grid(64)](buf7, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf7, reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), buf1, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (4, 4, 4), (4, 1, 16), 0
), buf3, reinterpret_tensor(buf5, (16, 8), (8, 1), 0), buf7, primals_4
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class LSTMAttentionLayerNew(nn.Module):
def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim,
bias=False, dropout=0.0):
super().__init__()
self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)
self.output_proj = Linear(input_embed_dim + source_embed_dim,
output_embed_dim, bias=bias)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.input_proj.weight
primals_4 = self.output_proj.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1], output[2]
|
PeterouZh/SemiNAS
|
LSTMAttentionLayer
| false
| 17,821
|
[
"Apache-2.0"
] | 5
|
39731663271b994571160d43d796b2bb93386b3b
|
https://github.com/PeterouZh/SemiNAS/tree/39731663271b994571160d43d796b2bb93386b3b
|
Mean
|
from torch.nn import Module
import torch
import torch.utils.data
class Mean(Module):
def __init__(self, dim, keep_dim=False):
super(Mean, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MeanNew(Module):
def __init__(self, dim, keep_dim=False):
super(MeanNew, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
RL-WWW/ISST
|
Mean
| false
| 17,822
|
[
"BSD-3-Clause"
] | 5
|
42b656686fa9660794007a0bc00a7177937410e9
|
https://github.com/RL-WWW/ISST/tree/42b656686fa9660794007a0bc00a7177937410e9
|
GumbelQuantize
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
class GumbelQuantize(nn.Module):
"""
Reference:
Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
https://arxiv.org/abs/1611.01144
"""
def __init__(self, hidden_channel, n_e, e_dim, kl_weight=1.0, temp_init
=1.0, straight_through=True):
super().__init__()
self.e_dim = e_dim
self.n_e = n_e
self.straight_through = straight_through
self.temperature = temp_init
self.kl_weight = kl_weight
self.proj = nn.Conv2d(hidden_channel, n_e, kernel_size=1)
self.embedding = nn.Embedding(n_e, e_dim)
def get_codebook(self):
return self.embedding.weight
def get_codebook_entry(self, indices, shape=None):
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
def forward(self, z, temp=None):
hard = self.straight_through if self.training else True
temp = self.temperature if temp is None else temp
logits = self.proj(z)
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard)
min_encoding_indices = soft_one_hot.argmax(dim=1)
z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.
embedding.weight)
code_prob = F.softmax(logits, dim=1)
loss = self.kl_weight * torch.sum(code_prob * torch.log(code_prob *
self.n_e + 1e-10), dim=1).mean()
return z_q, loss, min_encoding_indices
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_channel': 4, 'n_e': 4, 'e_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_add_log_neg_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp22 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tl_math.log(tmp1)
tmp3 = -tmp2
tmp4 = tmp0 + tmp3
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp9 = tl_math.log(tmp8)
tmp10 = -tmp9
tmp11 = tmp7 + tmp10
tmp12 = tmp11 * tmp5
tmp13 = triton_helpers.maximum(tmp6, tmp12)
tmp16 = tl_math.log(tmp15)
tmp17 = -tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp18 * tmp5
tmp20 = triton_helpers.maximum(tmp13, tmp19)
tmp23 = tl_math.log(tmp22)
tmp24 = -tmp23
tmp25 = tmp21 + tmp24
tmp26 = tmp25 * tmp5
tmp27 = triton_helpers.maximum(tmp20, tmp26)
tmp28 = tmp6 - tmp27
tmp29 = tmp28 * tmp5
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp12 - tmp27
tmp32 = tmp31 * tmp5
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp30 + tmp33
tmp35 = tmp19 - tmp27
tmp36 = tmp35 * tmp5
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp34 + tmp37
tmp39 = tmp26 - tmp27
tmp40 = tmp39 * tmp5
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp38 + tmp41
tl.store(out_ptr0 + x2, tmp27, xmask)
tl.store(out_ptr1 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused__softmax_add_log_neg_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp7 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.log(tmp1)
tmp3 = -tmp2
tmp4 = tmp0 + tmp3
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp5
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp0 - tmp19
tmp21 = tl_math.exp(tmp20)
tl.store(in_out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr0 + x3, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_argmax_max_scatter_sub_3(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tmp46 == tmp10
tmp48 = 1.0
tmp49 = 0.0
tmp50 = tl.where(tmp47, tmp48, tmp49)
tmp51 = tmp50 - tmp0
tmp52 = tmp51 + tmp0
tmp53 = tmp46 == tmp11
tmp54 = tl.where(tmp53, tmp48, tmp49)
tmp55 = tmp54 - tmp1
tmp56 = tmp55 + tmp1
tmp57 = tmp52 > tmp56
tmp58 = tmp52 == tmp56
tmp59 = tmp52 != tmp52
tmp60 = tmp56 != tmp56
tmp61 = tmp59 > tmp60
tmp62 = tmp57 | tmp61
tmp63 = tmp59 & tmp60
tmp64 = tmp58 | tmp63
tmp65 = tmp64 & tmp12
tmp66 = tmp62 | tmp65
tmp67 = tl.where(tmp66, tmp52, tmp56)
tmp68 = tl.where(tmp66, tmp10, tmp11)
tmp69 = tmp46 == tmp26
tmp70 = tl.where(tmp69, tmp48, tmp49)
tmp71 = tmp70 - tmp17
tmp72 = tmp71 + tmp17
tmp73 = tmp67 > tmp72
tmp74 = tmp67 == tmp72
tmp75 = tmp67 != tmp67
tmp76 = tmp72 != tmp72
tmp77 = tmp75 > tmp76
tmp78 = tmp73 | tmp77
tmp79 = tmp75 & tmp76
tmp80 = tmp74 | tmp79
tmp81 = tmp68 < tmp26
tmp82 = tmp80 & tmp81
tmp83 = tmp78 | tmp82
tmp84 = tl.where(tmp83, tmp67, tmp72)
tmp85 = tl.where(tmp83, tmp68, tmp26)
tmp86 = tmp46 == tmp41
tmp87 = tl.where(tmp86, tmp48, tmp49)
tmp88 = tmp87 - tmp32
tmp89 = tmp88 + tmp32
tmp90 = tmp84 > tmp89
tmp91 = tmp84 == tmp89
tmp92 = tmp84 != tmp84
tmp93 = tmp89 != tmp89
tmp94 = tmp92 > tmp93
tmp95 = tmp90 | tmp94
tmp96 = tmp92 & tmp93
tmp97 = tmp91 | tmp96
tmp98 = tmp85 < tmp41
tmp99 = tmp97 & tmp98
tmp100 = tmp95 | tmp99
tl.where(tmp100, tmp84, tmp89)
tmp102 = tl.where(tmp100, tmp85, tmp41)
tl.store(out_ptr0 + x2, tmp46, xmask)
tl.store(out_ptr1 + x2, tmp102, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y1 = yindex // 4
y0 = yindex % 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y1), xmask & ymask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr1 + (x2 + 16 * y3), xmask & ymask)
tmp1 = y0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tmp8 = tmp7 + tmp6
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused_add_log_mean_mul_sum_6(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-10
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp0 * tmp5
tmp8 = tmp7 * tmp1
tmp9 = tmp8 + tmp3
tmp10 = tl_math.log(tmp9)
tmp11 = tmp7 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 * tmp1
tmp15 = tmp14 + tmp3
tmp16 = tl_math.log(tmp15)
tmp17 = tmp13 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 * tmp1
tmp21 = tmp20 + tmp3
tmp22 = tl_math.log(tmp21)
tmp23 = tmp19 * tmp22
tmp24 = tmp18 + tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 64.0
tmp29 = tmp27 / tmp28
tmp30 = 1.0
tmp31 = tmp29 * tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = torch.ops.aten.exponential.default(buf2)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_add_log_neg_1[grid(64)](buf1, buf4, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = buf4
del buf4
buf12 = buf2
del buf2
triton_poi_fused__softmax_add_log_neg_2[grid(256)](buf7, buf1, buf5,
buf6, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.int64)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_add_argmax_max_scatter_sub_3[grid(64)](buf7, buf8,
buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1),
torch.float32)
triton_poi_fused_clone_4[grid(16, 16)](buf8, buf7, buf10, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf8
buf11 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (1, 64, 4), (0, 4, 1),
0), reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0),
out=buf11)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(256)](buf12, buf13, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf12
buf14 = empty_strided_cuda((), (), torch.float32)
buf15 = buf14
del buf14
triton_per_fused_add_log_mean_mul_sum_6[grid(1)](buf15, buf13, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del buf13
return reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 1, 16, 4), 0
), buf15, buf9, primals_1, primals_3, buf1, buf7, reinterpret_tensor(
buf10, (1, 4, 64), (256, 1, 4), 0), reinterpret_tensor(primals_4, (
1, 4, 4), (16, 1, 4), 0)
class GumbelQuantizeNew(nn.Module):
"""
Reference:
Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
https://arxiv.org/abs/1611.01144
"""
def __init__(self, hidden_channel, n_e, e_dim, kl_weight=1.0, temp_init
=1.0, straight_through=True):
super().__init__()
self.e_dim = e_dim
self.n_e = n_e
self.straight_through = straight_through
self.temperature = temp_init
self.kl_weight = kl_weight
self.proj = nn.Conv2d(hidden_channel, n_e, kernel_size=1)
self.embedding = nn.Embedding(n_e, e_dim)
def get_codebook(self):
return self.embedding.weight
def get_codebook_entry(self, indices, shape=None):
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_4 = self.embedding.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1], output[2]
|
PeikeLi/pytorch-vector-quantization
|
GumbelQuantize
| false
| 17,823
|
[
"MIT"
] | 6
|
48ce6a74ec56b9d8c11dde2cd35b055a925c3070
|
https://github.com/PeikeLi/pytorch-vector-quantization/tree/48ce6a74ec56b9d8c11dde2cd35b055a925c3070
|
GeneratorLon
|
import torch
import torch.onnx
import torch.nn as nn
import torch.nn.functional as F
class GeneratorLon(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, tgt_lon_classes):
super(GeneratorLon, self).__init__()
self.proj = nn.Linear(d_model, 2, tgt_lon_classes)
def forward(self, x):
lon_pred = F.softmax(self.proj(x), dim=-1)
lon_pred = lon_pred[:, -1, :]
lon_pred = torch.squeeze(lon_pred)
return lon_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'tgt_lon_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf1, (4, 4, 2), (32, 2, 1), 24
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class GeneratorLonNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, tgt_lon_classes):
super(GeneratorLonNew, self).__init__()
self.proj = nn.Linear(d_model, 2, tgt_lon_classes)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
PhilippeW83440/conv-social-pooling
|
GeneratorLon
| false
| 17,824
|
[
"MIT"
] | 4
|
93d3a08af8678c3309d75a9bfb37df500da5cc46
|
https://github.com/PhilippeW83440/conv-social-pooling/tree/93d3a08af8678c3309d75a9bfb37df500da5cc46
|
C3D
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
from torch.nn.init import *
class C3D(nn.Module):
"""
The C3D network.
"""
def __init__(self, num_classes, pretrained=False, path=None):
super(C3D, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
self.path = path
if pretrained:
self.__load_pretrained_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
def __load_pretrained_weights(self):
"""Initialiaze network."""
corresp_name = {'features.0.weight': 'conv1.weight',
'features.0.bias': 'conv1.bias', 'features.3.weight':
'conv2.weight', 'features.3.bias': 'conv2.bias',
'features.6.weight': 'conv3a.weight', 'features.6.bias':
'conv3a.bias', 'features.8.weight': 'conv3b.weight',
'features.8.bias': 'conv3b.bias', 'features.11.weight':
'conv4a.weight', 'features.11.bias': 'conv4a.bias',
'features.13.weight': 'conv4b.weight', 'features.13.bias':
'conv4b.bias', 'features.16.weight': 'conv5a.weight',
'features.16.bias': 'conv5a.bias', 'features.18.weight':
'conv5b.weight', 'features.18.bias': 'conv5b.bias',
'classifier.0.weight': 'fc6.weight', 'classifier.0.bias':
'fc6.bias', 'classifier.3.weight': 'fc7.weight',
'classifier.3.bias': 'fc7.bias'}
p_dict = torch.load(self.path)
s_dict = self.state_dict()
for name in p_dict:
if name not in corresp_name:
continue
s_dict[corresp_name[name]] = p_dict[name]
self.load_state_dict(s_dict)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_inputs():
return [torch.rand([4, 3, 64, 64, 64])]
def get_init_inputs():
return [[], {'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
from torch.nn.init import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 8192 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 128 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_15, (512,), (1,))
assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (4096, 8192), (8192, 1))
assert_size_stride(primals_19, (4096,), (1,))
assert_size_stride(primals_20, (4096, 4096), (4096, 1))
assert_size_stride(primals_21, (4096,), (1,))
assert_size_stride(primals_22, (4, 4096), (4096, 1))
assert_size_stride(primals_23, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64, 64), (16777216, 262144,
4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2,
67108864, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2,
2], [1, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 64, 32, 32), (8388608, 65536,
1024, 32, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_1[grid(33554432)](buf6, primals_5,
33554432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2,
2], [2, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1
), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 32, 16, 16), (2097152, 8192, 256,
16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_2[grid(8388608)](buf11, primals_7,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 32, 16, 16), (2097152, 8192, 256,
16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_2[grid(8388608)](buf13, primals_9,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2,
2], [2, 2, 2])
buf15 = buf14[0]
buf16 = buf14[1]
del buf14
buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_3[grid(2097152)](buf18,
primals_11, 2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1))
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_3[grid(2097152)](buf20,
primals_13, 2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2,
2], [2, 2, 2])
buf22 = buf21[0]
buf23 = buf21[1]
del buf21
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_4[grid(262144)](buf25, primals_15,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_4[grid(262144)](buf27, primals_17,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2,
2], [2, 2, 2], [0, 1, 1])
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (9, 8192), (8192, 1), 0
), reinterpret_tensor(primals_18, (8192, 4096), (1, 8192), 0),
out=buf31)
buf32 = buf31
del buf31
triton_poi_fused_relu_5[grid(36864)](buf32, primals_19, 36864,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
buf33 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf32, reinterpret_tensor(primals_20, (4096, 4096
), (1, 4096), 0), out=buf33)
buf34 = buf33
del buf33
triton_poi_fused_relu_5[grid(36864)](buf34, primals_21, 36864,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_21
buf35 = empty_strided_cuda((9, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_23, buf34, reinterpret_tensor(
primals_22, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf35)
del primals_23
return (buf35, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf4,
buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22,
buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (9, 8192), (
8192, 1), 0), buf32, buf34, primals_22, primals_20, primals_18)
class C3DNew(nn.Module):
"""
The C3D network.
"""
def __init__(self, num_classes, pretrained=False, path=None):
super(C3DNew, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
self.path = path
if pretrained:
self.__load_pretrained_weights()
def __load_pretrained_weights(self):
"""Initialiaze network."""
corresp_name = {'features.0.weight': 'conv1.weight',
'features.0.bias': 'conv1.bias', 'features.3.weight':
'conv2.weight', 'features.3.bias': 'conv2.bias',
'features.6.weight': 'conv3a.weight', 'features.6.bias':
'conv3a.bias', 'features.8.weight': 'conv3b.weight',
'features.8.bias': 'conv3b.bias', 'features.11.weight':
'conv4a.weight', 'features.11.bias': 'conv4a.bias',
'features.13.weight': 'conv4b.weight', 'features.13.bias':
'conv4b.bias', 'features.16.weight': 'conv5a.weight',
'features.16.bias': 'conv5a.bias', 'features.18.weight':
'conv5b.weight', 'features.18.bias': 'conv5b.bias',
'classifier.0.weight': 'fc6.weight', 'classifier.0.bias':
'fc6.bias', 'classifier.3.weight': 'fc7.weight',
'classifier.3.bias': 'fc7.bias'}
p_dict = torch.load(self.path)
s_dict = self.state_dict()
for name in p_dict:
if name not in corresp_name:
continue
s_dict[corresp_name[name]] = p_dict[name]
self.load_state_dict(s_dict)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3a.weight
primals_7 = self.conv3a.bias
primals_8 = self.conv3b.weight
primals_9 = self.conv3b.bias
primals_10 = self.conv4a.weight
primals_11 = self.conv4a.bias
primals_12 = self.conv4b.weight
primals_13 = self.conv4b.bias
primals_14 = self.conv5a.weight
primals_15 = self.conv5a.bias
primals_16 = self.conv5b.weight
primals_17 = self.conv5b.bias
primals_18 = self.fc6.weight
primals_19 = self.fc6.bias
primals_20 = self.fc7.weight
primals_21 = self.fc7.bias
primals_22 = self.fc8.weight
primals_23 = self.fc8.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0]
|
Luoyadan/MM2020_ABG
|
C3D
| false
| 17,825
|
[
"MIT"
] | 8
|
d74cf915deea7bb425518f5bd40e64a9a7341981
|
https://github.com/Luoyadan/MM2020_ABG/tree/d74cf915deea7bb425518f5bd40e64a9a7341981
|
GluMlp
|
import torch
import torch.nn as nn
class GluMlp(nn.Module):
""" MLP w/ GLU style gating
See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.Sigmoid, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert hidden_features % 2 == 0
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features // 2, out_features)
self.drop = nn.Dropout(drop)
def init_weights(self):
fc1_mid = self.fc1.bias.shape[0] // 2
nn.init.ones_(self.fc1.bias[fc1_mid:])
nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-06)
def forward(self, x):
x = self.fc1(x)
x, gates = x.chunk(2, dim=-1)
x = x * self.act(gates)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp2 * tmp1
tl.store(out_ptr0 + x2, tmp1, xmask)
tl.store(out_ptr1 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(128)](buf0, buf1, buf2, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 2), (64, 16, 4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), primals_4
class GluMlpNew(nn.Module):
""" MLP w/ GLU style gating
See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.Sigmoid, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert hidden_features % 2 == 0
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features // 2, out_features)
self.drop = nn.Dropout(drop)
def init_weights(self):
fc1_mid = self.fc1.bias.shape[0] // 2
nn.init.ones_(self.fc1.bias[fc1_mid:])
nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-06)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
RICE-EIC/Patch-Fool
|
GluMlp
| false
| 17,826
|
[
"MIT"
] | 7
|
9638ec33a4d13b0c5ff0ec3ee5ce6b46ea7da5a6
|
https://github.com/RICE-EIC/Patch-Fool/tree/9638ec33a4d13b0c5ff0ec3ee5ce6b46ea7da5a6
|
AffinityLoss
|
import torch
from torch import Tensor
import torch.nn as nn
class AffinityLoss(nn.Module):
"""
GNINA affinity loss.
Parameters
----------
reduction: str
Reduction method (mean or sum)
delta: float
Scaling factor
penalty: float
Penalty factor
pseudo_huber: bool
Use pseudo-huber loss as opposed to L2 loss
scale: float
Scaling factor for the loss
Notes
-----
Translated from the original custom Caffe layer. Not all functionality is
implemented.
https://github.com/gnina/gnina/blob/master/caffe/src/caffe/layers/affinity_loss_layer.cpp
The :code:`scale` parameter is different from the original implementation. In the
original Caffe implementation, the :code:`scale` parameter is used to scale the
gradients in the backward pass. Here the scale parameter scales the loss function
directly in the forward pass.
Definition of pseudo-Huber loss:
https://en.wikipedia.org/wiki/Huber_loss#Pseudo-Huber_loss_function
"""
def __init__(self, reduction: 'str'='mean', delta: 'float'=1.0, penalty:
'float'=0.0, pseudo_huber: 'bool'=False, scale: 'float'=1.0):
super().__init__()
self.delta: 'float' = delta
self.delta2: 'float' = delta * delta
self.penalty: 'float' = penalty
self.pseudo_huber: 'bool' = pseudo_huber
self.scale: 'float' = scale
assert reduction in ['mean', 'sum']
self.reduction: 'str' = reduction
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
"""
Parameters
----------
input: Tensor
Predicted values
target: Tensor
Target values
Notes
-----
Binding affinity (pK) is positive for good poses and negative for bad poses (and
zero if unknown). This allows to distinguish good poses from bad poses (to
which a penalty is applied) without explicitly using the labels or the RMSD.
"""
assert input.size() == target.size()
diff = torch.where(target > 0, input - target, torch.zeros_like(input))
diff = torch.where(torch.logical_and(target < 0, target > -input),
input + target + self.penalty, diff)
if self.pseudo_huber:
scaled_diff = diff / self.delta
loss = self.delta2 * (torch.sqrt(1.0 + scaled_diff *
scaled_diff) - 1.0)
else:
loss = diff * diff
if self.reduction == 'mean':
reduced_loss = torch.mean(loss)
else:
reduced_loss = torch.sum(loss)
return self.scale * reduced_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_gt_logical_and_lt_mean_mul_neg_sub_where_zeros_like_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0
tmp2 = tmp0 < tmp1
tmp4 = -tmp3
tmp5 = tmp0 > tmp4
tmp6 = tmp2 & tmp5
tmp7 = tmp3 + tmp0
tmp8 = tmp7 + tmp1
tmp9 = tmp0 > tmp1
tmp10 = tmp3 - tmp0
tmp11 = tl.where(tmp9, tmp10, tmp1)
tmp12 = tl.where(tmp6, tmp8, tmp11)
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_gt_logical_and_lt_mean_mul_neg_sub_where_zeros_like_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AffinityLossNew(nn.Module):
"""
GNINA affinity loss.
Parameters
----------
reduction: str
Reduction method (mean or sum)
delta: float
Scaling factor
penalty: float
Penalty factor
pseudo_huber: bool
Use pseudo-huber loss as opposed to L2 loss
scale: float
Scaling factor for the loss
Notes
-----
Translated from the original custom Caffe layer. Not all functionality is
implemented.
https://github.com/gnina/gnina/blob/master/caffe/src/caffe/layers/affinity_loss_layer.cpp
The :code:`scale` parameter is different from the original implementation. In the
original Caffe implementation, the :code:`scale` parameter is used to scale the
gradients in the backward pass. Here the scale parameter scales the loss function
directly in the forward pass.
Definition of pseudo-Huber loss:
https://en.wikipedia.org/wiki/Huber_loss#Pseudo-Huber_loss_function
"""
def __init__(self, reduction: 'str'='mean', delta: 'float'=1.0, penalty:
'float'=0.0, pseudo_huber: 'bool'=False, scale: 'float'=1.0):
super().__init__()
self.delta: 'float' = delta
self.delta2: 'float' = delta * delta
self.penalty: 'float' = penalty
self.pseudo_huber: 'bool' = pseudo_huber
self.scale: 'float' = scale
assert reduction in ['mean', 'sum']
self.reduction: 'str' = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
RMeli/gnina-torch
|
AffinityLoss
| false
| 17,827
|
[
"MIT"
] | 5
|
eb57e2a62628d39f2a66e7fa1748e80705366761
|
https://github.com/RMeli/gnina-torch/tree/eb57e2a62628d39f2a66e7fa1748e80705366761
|
FingerprintDecoder
|
import torch
import torch.utils.data
import torch.nn.functional as F
class FingerprintDecoder(torch.nn.Module):
def __init__(self, n_in, n_out, dropout=0.1):
super(FingerprintDecoder, self).__init__()
if n_out > n_in:
n_hidden = n_out // 2
else:
n_hidden = n_in // 2
self.fc1 = torch.nn.Linear(n_in, n_hidden)
self.fc2 = torch.nn.Linear(n_hidden, n_out)
self.dropout = dropout
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1,
primals_2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4, buf3
class FingerprintDecoderNew(torch.nn.Module):
def __init__(self, n_in, n_out, dropout=0.1):
super(FingerprintDecoderNew, self).__init__()
if n_out > n_in:
n_hidden = n_out // 2
else:
n_hidden = n_in // 2
self.fc1 = torch.nn.Linear(n_in, n_hidden)
self.fc2 = torch.nn.Linear(n_hidden, n_out)
self.dropout = dropout
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Prepaire/MolGNN_fewshot
|
FingerprintDecoder
| false
| 17,828
|
[
"MIT"
] | 6
|
c7c17afdeae7f2ef0c8e3ca2da033091ec7537ca
|
https://github.com/Prepaire/MolGNN_fewshot/tree/c7c17afdeae7f2ef0c8e3ca2da033091ec7537ca
|
CustomGruCell
|
import torch
import numpy as np
import torch.nn as nn
class CustomGruCell(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(CustomGruCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, h):
i_r = self.fc_ir(x)
h_r = self.fc_hr(h)
i_z = self.fc_iz(x)
h_z = self.fc_hz(h)
i_n = self.fc_in(x)
h_n = self.fc_hn(h)
resetgate = (i_r + h_r).sigmoid()
inputgate = (i_z + h_z).sigmoid()
newgate = (i_n + resetgate * h_n).tanh()
hy = newgate + inputgate * (h - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, xmask)
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr6 + x2, xmask)
tmp17 = tl.load(in_ptr7 + x2, xmask)
tmp21 = tl.load(in_ptr8 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp7 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp22 = tmp21 - tmp20
tmp23 = tmp15 * tmp22
tmp24 = tmp20 + tmp23
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(in_out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr0 + x2, tmp24, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3)
del primals_9
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf4)
del primals_11
del primals_12
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf5)
del primals_13
del primals_14
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(256)](buf6, buf7,
primals_2, buf1, primals_5, primals_8, buf3, primals_10, buf4,
buf5, primals_6, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf3
del primals_10
del primals_2
del primals_5
del primals_8
return buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf4, buf5, buf6, buf7
class CustomGruCellNew(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(CustomGruCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, input_0, input_1):
primals_1 = self.fc_ir.weight
primals_2 = self.fc_ir.bias
primals_4 = self.fc_hr.weight
primals_5 = self.fc_hr.bias
primals_7 = self.fc_iz.weight
primals_8 = self.fc_iz.bias
primals_9 = self.fc_hz.weight
primals_10 = self.fc_hz.bias
primals_11 = self.fc_in.weight
primals_12 = self.fc_in.bias
primals_13 = self.fc_hn.weight
primals_14 = self.fc_hn.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
Rahul-160/PySyft
|
CustomGruCell
| false
| 17,829
|
[
"Apache-2.0"
] | 7
|
182627db2369d6f93aa0667f5ea2abee5b878d58
|
https://github.com/Rahul-160/PySyft/tree/182627db2369d6f93aa0667f5ea2abee5b878d58
|
GeneratorLat
|
import torch
import torch.onnx
import torch.nn as nn
import torch.nn.functional as F
class GeneratorLat(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, tgt_lat_classes):
super(GeneratorLat, self).__init__()
self.proj = nn.Linear(d_model, tgt_lat_classes)
def forward(self, x):
lat_pred = F.softmax(self.proj(x), dim=-1)
lat_pred = lat_pred[:, -1, :]
lat_pred = torch.squeeze(lat_pred)
return lat_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'tgt_lat_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4), (64, 4, 1), 48
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class GeneratorLatNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, tgt_lat_classes):
super(GeneratorLatNew, self).__init__()
self.proj = nn.Linear(d_model, tgt_lat_classes)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
PhilippeW83440/conv-social-pooling
|
GeneratorLat
| false
| 17,830
|
[
"MIT"
] | 4
|
93d3a08af8678c3309d75a9bfb37df500da5cc46
|
https://github.com/PhilippeW83440/conv-social-pooling/tree/93d3a08af8678c3309d75a9bfb37df500da5cc46
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, input_dim, output_dim):
super(Actor, self).__init__()
self.fc1 = nn.Linear(input_dim, 128)
self.fc2 = nn.Linear(128, output_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf4, primals_4, buf5
class ActorNew(nn.Module):
def __init__(self, input_dim, output_dim):
super(ActorNew, self).__init__()
self.fc1 = nn.Linear(input_dim, 128)
self.fc2 = nn.Linear(128, output_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
PaulPan00/donkey_wrapper
|
Actor
| false
| 17,831
|
[
"MIT"
] | 6
|
a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
https://github.com/PaulPan00/donkey_wrapper/tree/a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
RevPaddingLayer
|
import torch
import torch.nn as nn
class RevPaddingLayer(nn.Module):
def __init__(self, stride):
super().__init__()
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
def forward(self, x):
x = self.pool(x)
zeros = torch.zeros_like(x)
zeros_left, zeros_right = zeros.chunk(2, dim=1)
y = torch.cat([zeros_left, x, zeros_right], dim=1)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x6 = xindex
x3 = xindex // 64
x7 = xindex % 64
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x6), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x0 + -1 * x1 + x0 * x1 + (5 * (5 <= 2 + x0) + (2 + x0) *
(2 + x0 < 5)) * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)
) + -1 * x0 * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)
) + -1 * x1 * (5 * (5 <= 2 + x0) + (2 + x0) * (2 + x0 < 5)) + (5 *
(5 <= 2 + x0) + (2 + x0) * (2 + x0 < 5)) + (5 * (5 <= 2 + x1) + (2 +
x1) * (2 + x1 < 5))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + (x7 + 128 * x3), tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
tmp0 = 0.0
tl.store(out_ptr0 + (x0 + 128 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 4, 4), (128, 16, 4, 1), 32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = reinterpret_tensor(buf3, (4, 2, 4, 4), (128, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(128)](buf1, 128, XBLOCK=128, num_warps=
4, num_stages=1)
buf2 = reinterpret_tensor(buf3, (4, 2, 4, 4), (128, 16, 4, 1), 96)
triton_poi_fused_cat_1[grid(128)](buf2, 128, XBLOCK=128, num_warps=
4, num_stages=1)
return buf3,
class RevPaddingLayerNew(nn.Module):
def __init__(self, stride):
super().__init__()
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
RKorzeniowski/BigBiGAN-PyTorch
|
RevPaddingLayer
| false
| 17,832
|
[
"MIT"
] | 5
|
caaaf69b094ae45e9fa3608577fde32dafa1f16e
|
https://github.com/RKorzeniowski/BigBiGAN-PyTorch/tree/caaaf69b094ae45e9fa3608577fde32dafa1f16e
|
AvgPool2d
|
from torch.nn import Module
import torch
import torch as th
class AvgPool2d(Module):
"""
This class is the beginning of an exact python port of the torch.nn.AvgPool2d
module. Because PySyft cannot hook into layers which are implemented in C++,
our special functionalities (such as encrypted computation) do not work with
torch.nn.AvgPool2d and so we must have python ports available for all layer types
which we seek to use.
Note that this module has been tested to ensure that it outputs the exact output
values that the main module outputs in the same order that the main module does.
However, there is often some rounding error of unknown origin, usually less than
1e-6 in magnitude.
This module has not yet been tested with GPUs but should work out of the box.
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
"""For information on the constructor arguments, please see PyTorch's
documentation in torch.nn.AvgPool2d"""
super().__init__()
assert padding == 0
assert ceil_mode is False
assert count_include_pad is True
assert divisor_override is None
if stride is None:
stride = kernel_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
self._one_over_kernel_size = 1 / (self.kernel_size * self.kernel_size)
def forward(self, data):
batch_size, out_channels, rows, cols = data.shape
kernel_results = []
for i in range(0, rows - self.kernel_size + 1, self.stride):
for j in range(0, cols - self.kernel_size + 1, self.stride):
kernel_out = data[:, :, i:i + self.kernel_size, j:j + self.
kernel_size].sum((2, 3)) * self._one_over_kernel_size
kernel_results.append(kernel_out.unsqueeze(2))
pred = th.cat(kernel_results, axis=2).view(batch_size, out_channels,
int(rows / self.stride), int(cols / self.stride))
return pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 0.0625
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0),
class AvgPool2dNew(Module):
"""
This class is the beginning of an exact python port of the torch.nn.AvgPool2d
module. Because PySyft cannot hook into layers which are implemented in C++,
our special functionalities (such as encrypted computation) do not work with
torch.nn.AvgPool2d and so we must have python ports available for all layer types
which we seek to use.
Note that this module has been tested to ensure that it outputs the exact output
values that the main module outputs in the same order that the main module does.
However, there is often some rounding error of unknown origin, usually less than
1e-6 in magnitude.
This module has not yet been tested with GPUs but should work out of the box.
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
"""For information on the constructor arguments, please see PyTorch's
documentation in torch.nn.AvgPool2d"""
super().__init__()
assert padding == 0
assert ceil_mode is False
assert count_include_pad is True
assert divisor_override is None
if stride is None:
stride = kernel_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
self._one_over_kernel_size = 1 / (self.kernel_size * self.kernel_size)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Rahul-160/PySyft
|
AvgPool2d
| false
| 17,833
|
[
"Apache-2.0"
] | 7
|
182627db2369d6f93aa0667f5ea2abee5b878d58
|
https://github.com/Rahul-160/PySyft/tree/182627db2369d6f93aa0667f5ea2abee5b878d58
|
myEncoder
|
import torch
import torch.nn.functional as F
class myEncoder(torch.nn.Module):
def __init__(self, fomSize, romSize):
super(myEncoder, self).__init__()
self.fc1 = torch.nn.Linear(fomSize, 200)
self.fc2 = torch.nn.Linear(200, 64)
self.fc3 = torch.nn.Linear(64, romSize)
def forward(self, x):
x = self.fc1(x)
x = F.elu(x)
x = self.fc2(x)
x = F.elu(x)
x = self.fc3(x)
x = F.elu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fomSize': 4, 'romSize': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_elu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 200), (200, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(12800)](buf0, buf1, 12800, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 200),
(200, 1), 0), reinterpret_tensor(primals_4, (200, 64), (1, 200),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.
float32)
triton_poi_fused_elu_1[grid(4096)](buf2, buf3, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_elu_2[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 200), (200, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 64), (64, 1), 0
), buf4, primals_6, primals_4
class myEncoderNew(torch.nn.Module):
def __init__(self, fomSize, romSize):
super(myEncoderNew, self).__init__()
self.fc1 = torch.nn.Linear(fomSize, 200)
self.fc2 = torch.nn.Linear(200, 64)
self.fc3 = torch.nn.Linear(64, romSize)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Pressio/pressio4py
|
myEncoder
| false
| 17,834
|
[
"Unlicense",
"BSD-3-Clause"
] | 4
|
36676dbd112a7c7960ccbf302ff14d4376c819ec
|
https://github.com/Pressio/pressio4py/tree/36676dbd112a7c7960ccbf302ff14d4376c819ec
|
Foo
|
import torch
import torch.nn.functional
import torch.nn.parallel
import torch.utils.data
import torch.optim
import torch.utils.data.distributed
class Foo(torch.nn.Module):
def __init__(self, size):
super(Foo, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
def forward(self, input):
return self.n * input + self.m
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional
import torch.nn.parallel
import torch.utils.data
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class FooNew(torch.nn.Module):
def __init__(self, size):
super(FooNew, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
def forward(self, input_0):
primals_1 = self.n
primals_3 = self.m
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ROCmSoftwarePlatform/apex
|
Foo
| false
| 17,835
|
[
"BSD-3-Clause"
] | 6
|
db92ee13ca55e284342bdca84bddc38c3812f1ed
|
https://github.com/ROCmSoftwarePlatform/apex/tree/db92ee13ca55e284342bdca84bddc38c3812f1ed
|
FermiDiracDecoder
|
from torch.nn import Module
import torch
from torch.nn.modules.module import Module
import torch.optim
import torch.nn.modules.loss
class FermiDiracDecoder(Module):
"""Fermi Dirac to compute edge probabilities based on distances."""
def __init__(self, r, t):
super(FermiDiracDecoder, self).__init__()
self.r = r
self.t = t
def forward(self, dist):
probs = 1.0 / (torch.exp((dist - self.r) / self.t) + 1.0)
return probs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'r': 4, 't': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch.nn.modules.module import Module
import torch.optim
import torch.nn.modules.loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_exp_mul_reciprocal_sub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = 0.25
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp8 / tmp7
tmp10 = tmp9 * tmp6
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_exp_mul_reciprocal_sub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class FermiDiracDecoderNew(Module):
"""Fermi Dirac to compute edge probabilities based on distances."""
def __init__(self, r, t):
super(FermiDiracDecoderNew, self).__init__()
self.r = r
self.t = t
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
RingBDStack/ACE-HGNN
|
FermiDiracDecoder
| false
| 17,836
|
[
"MIT"
] | 5
|
afc610dd838951dcd6c3910795b472566f0c23ca
|
https://github.com/RingBDStack/ACE-HGNN/tree/afc610dd838951dcd6c3910795b472566f0c23ca
|
Fusion2_GateLayer
|
import torch
from torch import nn
class Fusion2_GateLayer(nn.Module):
def __init__(self, input_dim):
super(Fusion2_GateLayer, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)
self._norm_layer2 = nn.Linear(input_dim, 1)
def forward(self, input1, input2):
norm_input = self._norm_layer1(torch.cat([input1, input2], dim=-1))
gate = torch.sigmoid(self._norm_layer2(norm_input))
gated_emb = gate * input1 + (1 - gate) * input2
return gated_emb
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp6 = tl.load(in_ptr2 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_1[grid(256)](buf3, primals_1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf4, primals_1, primals_2, reinterpret_tensor(buf0, (64, 8), (8,
1), 0), buf1, buf3, primals_5
class Fusion2_GateLayerNew(nn.Module):
def __init__(self, input_dim):
super(Fusion2_GateLayerNew, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)
self._norm_layer2 = nn.Linear(input_dim, 1)
def forward(self, input_0, input_1):
primals_3 = self._norm_layer1.weight
primals_4 = self._norm_layer1.bias
primals_5 = self._norm_layer2.weight
primals_6 = self._norm_layer2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
RUCAIBox/WSDM2022-C2CRS
|
Fusion2_GateLayer
| false
| 17,837
|
[
"MIT"
] | 4
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
https://github.com/RUCAIBox/WSDM2022-C2CRS/tree/8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
CrossEntropyLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
def _is_long(x):
return isinstance(x, torch.LongTensor) or isinstance(x, torch.LongTensor)
def onehot(indexes, N=None, ignore_index=None):
"""
Creates a one-representation of indexes with N possible entries
if N is not specified, it will suit the maximum index appearing.
indexes is a long-tensor of indexes
ignore_index will be zero in onehot representation
"""
if N is None:
N = indexes.max() + 1
sz = list(indexes.size())
output = indexes.new().byte().resize_(*sz, N).zero_()
output.scatter_(-1, indexes.unsqueeze(-1), 1)
if ignore_index is not None and ignore_index >= 0:
output.masked_fill_(indexes.eq(ignore_index).unsqueeze(-1), 0)
return output
def smoothing(out, y, smooth_eps):
num_classes = out.shape[1]
if smooth_eps == 0:
return y
my = onehot(y, num_classes)
true_class, false_class = 1.0 - smooth_eps * num_classes / (num_classes - 1
), smooth_eps / (num_classes - 1)
my = my * true_class + torch.ones_like(my) * false_class
return my
def cross_entropy(logits, target, weight=None, ignore_index=-100, reduction
='mean', smooth_eps=0.0):
"""cross entropy loss with support for target distributions"""
with torch.no_grad():
if smooth_eps > 0:
target = smoothing(logits, target, smooth_eps)
if _is_long(target):
return F.cross_entropy(logits, target, weight, ignore_index=
ignore_index, reduction=reduction)
masked_indices = None
logits.size(-1)
if _is_long(target) and ignore_index >= 0:
masked_indices = target.eq(ignore_index)
lsm = F.log_softmax(logits, dim=-1)
if weight is not None:
lsm = lsm * weight.unsqueeze(0)
loss = -(target * lsm).sum(-1)
if masked_indices is not None:
loss.masked_fill_(masked_indices, 0)
if reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
if masked_indices is None:
loss = loss.mean()
else:
loss = loss.sum() / float(loss.size(0) - masked_indices.sum())
return loss
class CrossEntropyLoss(nn.CrossEntropyLoss):
"""CrossEntropyLoss - with ability to recieve distrbution as targets and built-in label smoothing"""
def __init__(self, weight=None, ignore_index=-100, reduction='mean',
smooth_eps=0.0):
super(CrossEntropyLoss, self).__init__(weight=weight, ignore_index=
ignore_index, reduction=reduction)
self.smooth_eps = smooth_eps
def forward(self, input, target):
return cross_entropy(input, target, self.weight, self.ignore_index,
self.reduction, self.smooth_eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = 64.0
tmp32 = tmp30 / tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
def _is_long(x):
return isinstance(x, torch.LongTensor) or isinstance(x, torch.LongTensor)
def onehot(indexes, N=None, ignore_index=None):
"""
Creates a one-representation of indexes with N possible entries
if N is not specified, it will suit the maximum index appearing.
indexes is a long-tensor of indexes
ignore_index will be zero in onehot representation
"""
if N is None:
N = indexes.max() + 1
sz = list(indexes.size())
output = indexes.new().byte().resize_(*sz, N).zero_()
output.scatter_(-1, indexes.unsqueeze(-1), 1)
if ignore_index is not None and ignore_index >= 0:
output.masked_fill_(indexes.eq(ignore_index).unsqueeze(-1), 0)
return output
def smoothing(out, y, smooth_eps):
num_classes = out.shape[1]
if smooth_eps == 0:
return y
my = onehot(y, num_classes)
true_class, false_class = 1.0 - smooth_eps * num_classes / (num_classes - 1
), smooth_eps / (num_classes - 1)
my = my * true_class + torch.ones_like(my) * false_class
return my
def cross_entropy(logits, target, weight=None, ignore_index=-100, reduction
='mean', smooth_eps=0.0):
"""cross entropy loss with support for target distributions"""
with torch.no_grad():
if smooth_eps > 0:
target = smoothing(logits, target, smooth_eps)
if _is_long(target):
return F.cross_entropy(logits, target, weight, ignore_index=
ignore_index, reduction=reduction)
masked_indices = None
logits.size(-1)
if _is_long(target) and ignore_index >= 0:
masked_indices = target.eq(ignore_index)
lsm = F.log_softmax(logits, dim=-1)
if weight is not None:
lsm = lsm * weight.unsqueeze(0)
loss = -(target * lsm).sum(-1)
if masked_indices is not None:
loss.masked_fill_(masked_indices, 0)
if reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
if masked_indices is None:
loss = loss.mean()
else:
loss = loss.sum() / float(loss.size(0) - masked_indices.sum())
return loss
class CrossEntropyLossNew(nn.CrossEntropyLoss):
"""CrossEntropyLoss - with ability to recieve distrbution as targets and built-in label smoothing"""
def __init__(self, weight=None, ignore_index=-100, reduction='mean',
smooth_eps=0.0):
super(CrossEntropyLossNew, self).__init__(weight=weight,
ignore_index=ignore_index, reduction=reduction)
self.smooth_eps = smooth_eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Randl/Ranger_Mish_reimplementation
|
CrossEntropyLoss
| false
| 17,838
|
[
"MIT"
] | 7
|
36f580ce8a02fae1929e101c9bd6987ccd2a5843
|
https://github.com/Randl/Ranger_Mish_reimplementation/tree/36f580ce8a02fae1929e101c9bd6987ccd2a5843
|
BasicBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=True)
class BasicBlock(nn.Module):
"""
Residual BasicBlock
"""
def __init__(self, inplanes, planes, stride=1, weightnorm=None,
shortcut=True):
super(BasicBlock, self).__init__()
self.shortcut = shortcut
self.conv1 = conv3x3(inplanes, planes, stride)
self.relu1 = nn.PReLU(num_parameters=planes, init=0.1)
self.relu2 = nn.PReLU(num_parameters=planes, init=0.1)
self.conv2 = conv3x3(inplanes, planes, stride)
if weightnorm:
self.conv1 = weight_norm(self.conv1)
self.conv2 = weight_norm(self.conv2)
def forward(self, x):
out = self.relu1(x)
out = F.pad(out, (1, 1, 1, 1), 'reflect')
out = self.conv1(out)
out = out[:, :, :x.shape[2], :x.shape[3]]
out = self.relu2(out)
out = F.pad(out, (1, 1, 1, 1), 'reflect')
out = self.conv2(out)
out = out[:, :, :x.shape[2], :x.shape[3]]
if self.shortcut:
out = x + out
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__prelu_kernel_reflection_pad2d_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp4 = tmp3 * tmp0
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x5, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__prelu_kernel_reflection_pad2d_0[grid(576)](primals_2,
primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused__prelu_kernel_reflection_pad2d_0[grid(576)](buf2,
primals_5, buf3, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_2[grid(256)](buf5, primals_2,
primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_2, primals_3, primals_5, primals_6, buf0, buf2, buf3
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=True)
class BasicBlockNew(nn.Module):
"""
Residual BasicBlock
"""
def __init__(self, inplanes, planes, stride=1, weightnorm=None,
shortcut=True):
super(BasicBlockNew, self).__init__()
self.shortcut = shortcut
self.conv1 = conv3x3(inplanes, planes, stride)
self.relu1 = nn.PReLU(num_parameters=planes, init=0.1)
self.relu2 = nn.PReLU(num_parameters=planes, init=0.1)
self.conv2 = conv3x3(inplanes, planes, stride)
if weightnorm:
self.conv1 = weight_norm(self.conv1)
self.conv2 = weight_norm(self.conv2)
def forward(self, input_0):
primals_3 = self.conv1.weight
primals_1 = self.conv1.bias
primals_4 = self.relu1.weight
primals_5 = self.relu2.weight
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
RaoUmer/ISRResCNet
|
BasicBlock
| false
| 17,839
|
[
"MIT"
] | 6
|
8175bb9efa5bba2cce4ad86616219209c20b7244
|
https://github.com/RaoUmer/ISRResCNet/tree/8175bb9efa5bba2cce4ad86616219209c20b7244
|
HiResPose
|
import torch
import torch.nn as nn
from collections import OrderedDict
from typing import Tuple
import torch.nn.functional as F
class HiResPose(nn.Module):
"""
GNINA HiResPose model architecture.
Parameters
----------
input_dims: tuple
Model input dimensions (channels, depth, height, width)
Notes
-----
This architecture was translated from the following Caffe model:
https://github.com/gnina/models/blob/master/crossdocked_paper/hires_pose.model
The main difference is that the PyTorch implementation resurns the log softmax.
This model is implemented only for multi-task pose and affinity prediction.
"""
def __init__(self, input_dims: 'Tuple'):
super().__init__()
self.input_dims = input_dims
self.features = nn.Sequential(OrderedDict([('unit1_conv', nn.Conv3d
(in_channels=input_dims[0], out_channels=32, kernel_size=3,
stride=1, padding=1)), ('unit1_func', nn.ReLU()), ('unit2_pool',
nn.MaxPool3d(kernel_size=2, stride=2)), ('unit2_conv', nn.
Conv3d(in_channels=32, out_channels=64, kernel_size=3, stride=1,
padding=1)), ('unit2_func', nn.ReLU()), ('unit3_pool', nn.
MaxPool3d(kernel_size=2, stride=2)), ('unit3_conv', nn.Conv3d(
in_channels=64, out_channels=128, kernel_size=3, stride=1,
padding=1)), ('unit3_func', nn.ReLU())]))
self.features_out_size = input_dims[1] // 4 * input_dims[2
] // 4 * input_dims[3] // 4 * 128
self.pose = nn.Sequential(OrderedDict([('pose_output', nn.Linear(
in_features=self.features_out_size, out_features=2))]))
self.affinity = nn.Sequential(OrderedDict([('affinity_output', nn.
Linear(in_features=self.features_out_size, out_features=1))]))
def forward(self, x: 'torch.Tensor'):
"""
Parameters
----------
x: torch.Tensor
Input tensor
Notes
-----
The pose score is the log softmax of the output of the last linear layer.
"""
x = self.features(x)
None
x = x.view(-1, self.features_out_size)
pose_raw = self.pose(x)
pose_log = F.log_softmax(pose_raw, dim=1)
affinity = self.affinity(x)
return pose_log, affinity.squeeze(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dims': [4, 4, 4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from collections import OrderedDict
from typing import Tuple
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool3d_with_indices_1(in_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = yindex // 2
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 2 * x2 + 8 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 2
y0 = yindex % 2
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 2 * x2 + 8 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl_math.log(tmp8)
tmp10 = tmp4 - tmp9
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 32, 3, 3, 3), (864, 27, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (2, 128), (128, 1))
assert_size_stride(primals_9, (2,), (1,))
assert_size_stride(primals_10, (1, 128), (128, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 32, 4, 4, 4), (2048, 64, 16, 4, 1))
buf1 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(128, 16)](buf0, primals_2, buf1, 128,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2,
2], [2, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = empty_strided_cuda((32, 2, 2, 2), (8, 1, 4, 2), torch.int64)
triton_poi_fused_max_pool3d_with_indices_1[grid(64, 4)](buf4, buf5,
64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del buf4
buf6 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 32,
2, 2, 2), (256, 8, 4, 2, 1), 0), primals_4, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf6, (1, 64, 2, 2, 2), (512, 8, 4, 2, 1))
buf7 = empty_strided_cuda((64, 2, 2, 2), (8, 1, 4, 2), torch.float32)
triton_poi_fused_relu_2[grid(128, 4)](buf6, primals_5, buf7, 128, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del buf6
del primals_5
buf8 = torch.ops.aten.max_pool3d_with_indices.default(buf7, [2, 2,
2], [2, 2, 2])
buf9 = buf8[0]
buf10 = buf8[1]
del buf8
buf11 = extern_kernels.convolution(reinterpret_tensor(buf9, (1, 64,
1, 1, 1), (64, 1, 1, 1, 1), 0), primals_6, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf11, (1, 128, 1, 1, 1), (128, 1, 1, 1, 1))
buf12 = reinterpret_tensor(buf11, (128, 1, 1, 1), (1, 128, 128, 128), 0
)
del buf11
buf19 = empty_strided_cuda((128, 1, 1, 1), (1, 1, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf12,
primals_7, buf19, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (1, 128),
(0, 1), 0), reinterpret_tensor(primals_8, (128, 2), (1, 128), 0
), alpha=1, beta=1, out=buf13)
del primals_9
buf16 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
triton_per_fused__log_softmax_4[grid(1)](buf13, buf16, 1, 2, XBLOCK
=1, num_warps=2, num_stages=1)
del buf13
buf18 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (1, 128),
(0, 1), 0), reinterpret_tensor(primals_10, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf18)
del primals_11
return buf16, reinterpret_tensor(buf18, (1,), (1,), 0
), primals_1, primals_4, primals_6, reinterpret_tensor(primals_3, (
1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), buf1, buf5, reinterpret_tensor(buf3, (1, 32, 2, 2, 2), (256, 8,
4, 2, 1), 0), buf7, buf10, reinterpret_tensor(buf9, (1, 64, 1, 1, 1
), (64, 1, 1, 1, 1), 0), reinterpret_tensor(buf12, (1, 128), (128,
1), 0), buf16, primals_10, primals_8, buf19
class HiResPoseNew(nn.Module):
"""
GNINA HiResPose model architecture.
Parameters
----------
input_dims: tuple
Model input dimensions (channels, depth, height, width)
Notes
-----
This architecture was translated from the following Caffe model:
https://github.com/gnina/models/blob/master/crossdocked_paper/hires_pose.model
The main difference is that the PyTorch implementation resurns the log softmax.
This model is implemented only for multi-task pose and affinity prediction.
"""
def __init__(self, input_dims: 'Tuple'):
super().__init__()
self.input_dims = input_dims
self.features = nn.Sequential(OrderedDict([('unit1_conv', nn.Conv3d
(in_channels=input_dims[0], out_channels=32, kernel_size=3,
stride=1, padding=1)), ('unit1_func', nn.ReLU()), ('unit2_pool',
nn.MaxPool3d(kernel_size=2, stride=2)), ('unit2_conv', nn.
Conv3d(in_channels=32, out_channels=64, kernel_size=3, stride=1,
padding=1)), ('unit2_func', nn.ReLU()), ('unit3_pool', nn.
MaxPool3d(kernel_size=2, stride=2)), ('unit3_conv', nn.Conv3d(
in_channels=64, out_channels=128, kernel_size=3, stride=1,
padding=1)), ('unit3_func', nn.ReLU())]))
self.features_out_size = input_dims[1] // 4 * input_dims[2
] // 4 * input_dims[3] // 4 * 128
self.pose = nn.Sequential(OrderedDict([('pose_output', nn.Linear(
in_features=self.features_out_size, out_features=2))]))
self.affinity = nn.Sequential(OrderedDict([('affinity_output', nn.
Linear(in_features=self.features_out_size, out_features=1))]))
def forward(self, input_0):
primals_1 = self.features.unit1_conv.weight
primals_2 = self.features.unit1_conv.bias
primals_4 = self.features.unit2_conv.weight
primals_5 = self.features.unit2_conv.bias
primals_6 = self.features.unit3_conv.weight
primals_7 = self.features.unit3_conv.bias
primals_8 = self.pose.pose_output.weight
primals_9 = self.pose.pose_output.bias
primals_10 = self.affinity.affinity_output.weight
primals_11 = self.affinity.affinity_output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
RMeli/gnina-torch
|
HiResPose
| false
| 17,840
|
[
"MIT"
] | 5
|
eb57e2a62628d39f2a66e7fa1748e80705366761
|
https://github.com/RMeli/gnina-torch/tree/eb57e2a62628d39f2a66e7fa1748e80705366761
|
GraphAttentionLayer
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W = Parameter(torch.empty(size=(in_features, out_features)))
self.a = Parameter(torch.empty(size=(2 * out_features, 1)))
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.init_weight()
def init_weight(self):
nn.init.xavier_uniform_(self.W, gain=1.414)
nn.init.xavier_uniform_(self.a, gain=1.414)
def _prepare_attentional_mechanism_input(self, Wh):
N = Wh.shape[0]
Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0)
Wh_repeated_alternating = Wh.repeat(N, 1)
all_combinations_matrix = torch.cat([Wh_repeated_in_chunks,
Wh_repeated_alternating], dim=1)
return all_combinations_matrix.view(N, N, 2 * self.out_features
), Wh_repeated_in_chunks, Wh_repeated_alternating
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
a_input, _Wh_repeated_in_chunks, _Wh_repeated_alternating = (self.
_prepare_attentional_mechanism_input(Wh))
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
return h_prime
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'dropout': 0.5,
'alpha': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * (x1 // 4) + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(in_out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](primals_4, buf4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_2[grid(4)](buf4,
buf3, buf2, buf5, buf6, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(16)](buf7,
buf4, buf3, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf5
del buf6
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf7, buf0, out=buf8)
return buf8, buf3, buf4, buf7, reinterpret_tensor(buf0, (4, 4), (1, 4), 0
), reinterpret_tensor(buf1, (8, 16), (1, 8), 0), reinterpret_tensor(
primals_3, (1, 8), (1, 1), 0), reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0)
class GraphAttentionLayerNew(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha):
super(GraphAttentionLayerNew, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W = Parameter(torch.empty(size=(in_features, out_features)))
self.a = Parameter(torch.empty(size=(2 * out_features, 1)))
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.init_weight()
def init_weight(self):
nn.init.xavier_uniform_(self.W, gain=1.414)
nn.init.xavier_uniform_(self.a, gain=1.414)
def _prepare_attentional_mechanism_input(self, Wh):
N = Wh.shape[0]
Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0)
Wh_repeated_alternating = Wh.repeat(N, 1)
all_combinations_matrix = torch.cat([Wh_repeated_in_chunks,
Wh_repeated_alternating], dim=1)
return all_combinations_matrix.view(N, N, 2 * self.out_features
), Wh_repeated_in_chunks, Wh_repeated_alternating
def forward(self, input_0, input_1):
primals_1 = self.W
primals_3 = self.a
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RidongHan/GHE-LPC
|
GraphAttentionLayer
| false
| 17,841
|
[
"MIT"
] | 4
|
2a10f423d747aa28560a3bcbf29f7ec87422beb8
|
https://github.com/RidongHan/GHE-LPC/tree/2a10f423d747aa28560a3bcbf29f7ec87422beb8
|
Fusion2_MinusFCLayer
|
import torch
from torch import nn
class Fusion2_MinusFCLayer(nn.Module):
def __init__(self, input_dim):
super(Fusion2_MinusFCLayer, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 3, input_dim)
def forward(self, input1, input2):
norm_input = self._norm_layer1(torch.cat([input1, input2, input1 -
input2], dim=-1))
return norm_input
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp9, tmp10, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 12), (12, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](primals_1, primals_2, buf0, 768,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 12),
(12, 1), 0), reinterpret_tensor(primals_3, (12, 4), (1, 12), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 12), (12, 1), 0)
class Fusion2_MinusFCLayerNew(nn.Module):
def __init__(self, input_dim):
super(Fusion2_MinusFCLayerNew, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 3, input_dim)
def forward(self, input_0, input_1):
primals_3 = self._norm_layer1.weight
primals_4 = self._norm_layer1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RUCAIBox/WSDM2022-C2CRS
|
Fusion2_MinusFCLayer
| false
| 17,842
|
[
"MIT"
] | 4
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
https://github.com/RUCAIBox/WSDM2022-C2CRS/tree/8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
BertLinear
|
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
refer to: https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/modeling_bert.py
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
"""This class is LayerNorm model for Bert
"""
def __init__(self, hidden_size, eps=1e-12):
"""This function sets `BertLayerNorm` parameters
Arguments:
hidden_size {int} -- input size
Keyword Arguments:
eps {float} -- epsilon (default: {1e-12})
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
"""This function propagates forwardly
Arguments:
x {tensor} -- input tesor
Returns:
tensor -- LayerNorm outputs
"""
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertLinear(nn.Module):
"""This class is Linear model for Bert
"""
def __init__(self, input_size, output_size, activation=gelu, dropout=0.0):
"""This function sets `BertLinear` model parameters
Arguments:
input_size {int} -- input size
output_size {int} -- output size
Keyword Arguments:
activation {function} -- activation function (default: {gelu})
dropout {float} -- dropout rate (default: {0.0})
"""
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.linear = nn.Linear(input_size, output_size)
self.linear.weight.data.normal_(mean=0.0, std=0.02)
self.linear.bias.data.zero_()
self.activation = activation
self.layer_norm = BertLayerNorm(self.output_size)
if dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = lambda x: x
def get_input_dims(self):
return self.input_size
def get_output_dims(self):
return self.output_size
def forward(self, x):
"""This function propagates forwardly
Arguments:
x {tensor} -- input tensor
Returns:
tenor -- Linear outputs
"""
output = self.activation(self.linear(x))
return self.dropout(self.layer_norm(output))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = 1e-12
tmp14 = tmp12 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp11 / tmp15
tmp17 = tmp0 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4,
buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
refer to: https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/modeling_bert.py
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
"""This class is LayerNorm model for Bert
"""
def __init__(self, hidden_size, eps=1e-12):
"""This function sets `BertLayerNorm` parameters
Arguments:
hidden_size {int} -- input size
Keyword Arguments:
eps {float} -- epsilon (default: {1e-12})
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
"""This function propagates forwardly
Arguments:
x {tensor} -- input tesor
Returns:
tensor -- LayerNorm outputs
"""
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertLinearNew(nn.Module):
"""This class is Linear model for Bert
"""
def __init__(self, input_size, output_size, activation=gelu, dropout=0.0):
"""This function sets `BertLinear` model parameters
Arguments:
input_size {int} -- input size
output_size {int} -- output size
Keyword Arguments:
activation {function} -- activation function (default: {gelu})
dropout {float} -- dropout rate (default: {0.0})
"""
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.linear = nn.Linear(input_size, output_size)
self.linear.weight.data.normal_(mean=0.0, std=0.02)
self.linear.bias.data.zero_()
self.activation = activation
self.layer_norm = BertLayerNorm(self.output_size)
if dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = lambda x: x
def get_input_dims(self):
return self.input_size
def get_output_dims(self):
return self.output_size
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = self.layer_norm.weight
primals_5 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Receiling/ENPAR
|
BertLinear
| false
| 17,843
|
[
"MIT"
] | 5
|
decd2945d21a7be5a0f73c37cfc5e252301aab15
|
https://github.com/Receiling/ENPAR/tree/decd2945d21a7be5a0f73c37cfc5e252301aab15
|
Fusion2_FCLayer
|
import torch
from torch import nn
class Fusion2_FCLayer(nn.Module):
def __init__(self, input_dim):
super(Fusion2_FCLayer, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)
def forward(self, input1, input2):
norm_input = self._norm_layer1(torch.cat([input1, input2], dim=-1))
return norm_input
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0)
class Fusion2_FCLayerNew(nn.Module):
def __init__(self, input_dim):
super(Fusion2_FCLayerNew, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)
def forward(self, input_0, input_1):
primals_3 = self._norm_layer1.weight
primals_4 = self._norm_layer1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RUCAIBox/WSDM2022-C2CRS
|
Fusion2_FCLayer
| false
| 17,844
|
[
"MIT"
] | 4
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
https://github.com/RUCAIBox/WSDM2022-C2CRS/tree/8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
Fusion3_FCLayer
|
import torch
from torch import nn
class Fusion3_FCLayer(nn.Module):
def __init__(self, input_dim):
super(Fusion3_FCLayer, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 3, input_dim)
def forward(self, input1, input2, input3):
norm_input = self._norm_layer1(torch.cat([input1, input2, input3],
dim=-1))
return norm_input
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](primals_1, primals_2, primals_3,
buf0, 768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (64, 12),
(12, 1), 0), reinterpret_tensor(primals_4, (12, 4), (1, 12), 0),
alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 12), (12, 1), 0)
class Fusion3_FCLayerNew(nn.Module):
def __init__(self, input_dim):
super(Fusion3_FCLayerNew, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 3, input_dim)
def forward(self, input_0, input_1, input_2):
primals_4 = self._norm_layer1.weight
primals_5 = self._norm_layer1.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
RUCAIBox/WSDM2022-C2CRS
|
Fusion3_FCLayer
| false
| 17,845
|
[
"MIT"
] | 4
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
https://github.com/RUCAIBox/WSDM2022-C2CRS/tree/8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
DenseAtt
|
import torch
import torch.nn as nn
import torch.optim
import torch.nn.modules.loss
class DenseAtt(nn.Module):
def __init__(self, in_features, dropout):
super(DenseAtt, self).__init__()
self.dropout = dropout
self.linear = nn.Linear(2 * in_features, 1, bias=True)
self.in_features = in_features
def forward(self, x, adj):
n = x.size(0)
x_left = torch.unsqueeze(x, 1)
x_left = x_left.expand(-1, n, -1)
x_right = torch.unsqueeze(x, 0)
x_right = x_right.expand(n, -1, -1)
x_cat = torch.cat((x_left, x_right), dim=2)
att_adj = self.linear(x_cat).squeeze()
att_adj = torch.sigmoid(att_adj)
att_adj = torch.mul(adj.to_dense(), att_adj)
return att_adj
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
import torch.nn.modules.loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = xindex // 32
x1 = xindex // 8 % 4
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 8), (8, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_2, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](primals_4, buf2, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_4, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2
class DenseAttNew(nn.Module):
def __init__(self, in_features, dropout):
super(DenseAttNew, self).__init__()
self.dropout = dropout
self.linear = nn.Linear(2 * in_features, 1, bias=True)
self.in_features = in_features
def forward(self, input_0, input_1):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RingBDStack/ACE-HGNN
|
DenseAtt
| false
| 17,846
|
[
"MIT"
] | 5
|
afc610dd838951dcd6c3910795b472566f0c23ca
|
https://github.com/RingBDStack/ACE-HGNN/tree/afc610dd838951dcd6c3910795b472566f0c23ca
|
SelfAttentionBatch
|
import torch
from torch import nn
import torch.nn.functional as F
class SelfAttentionBatch(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionBatch, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)),
requires_grad=True)
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)), requires_grad
=True)
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, h):
e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze(
dim=1)
attention = F.softmax(e, dim=0)
return torch.matmul(attention, h)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'da': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf2, buf5, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0),
primals_2, out=buf6)
del buf5
return reinterpret_tensor(buf6, (4,), (1,), 0
), buf1, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)
class SelfAttentionBatchNew(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionBatchNew, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)),
requires_grad=True)
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)), requires_grad
=True)
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RUCAIBox/WSDM2022-C2CRS
|
SelfAttentionBatch
| false
| 17,847
|
[
"MIT"
] | 4
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
https://github.com/RUCAIBox/WSDM2022-C2CRS/tree/8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
SelfAttentionPooling
|
import torch
import torch.nn as nn
class SelfAttentionPooling(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
softmax = nn.functional.softmax
att_logits = self.W(batch_rep).squeeze(-1)
att_logits = att_mask + att_logits
att_w = softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 64
x3 = xindex // 64
x5 = xindex // 4 % 16
x2 = xindex // 16 % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 * tmp8
tmp13 = tmp11 + tmp12
tmp15 = tmp13 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp18 = tmp16 / tmp17
tmp19 = tmp10 * tmp18
tmp20 = tmp9 + tmp19
tmp24 = tmp22 + tmp23
tmp26 = tmp24 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp29 = tmp27 / tmp28
tmp30 = tmp21 * tmp29
tmp31 = tmp20 + tmp30
tmp35 = tmp33 + tmp34
tmp37 = tmp35 - tmp36
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp38 / tmp39
tmp41 = tmp32 * tmp40
tmp42 = tmp31 + tmp41
tl.store(out_ptr0 + x7, tmp42, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_0[grid(64)](primals_4, buf1, buf2,
buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(256)](primals_3, primals_4, buf1,
buf2, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del buf3
return buf4, primals_3, primals_4, buf1
class SelfAttentionPoolingNew(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPoolingNew, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, input_0, input_1):
primals_1 = self.W.weight
primals_2 = self.W.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RayTzeng/s3m-membership-inference
|
SelfAttentionPooling
| false
| 17,848
|
[
"MIT"
] | 9
|
ec1ed9438afc4fd3d7a55fd10e6065d2ecc861c4
|
https://github.com/RayTzeng/s3m-membership-inference/tree/ec1ed9438afc4fd3d7a55fd10e6065d2ecc861c4
|
Fusion3_MinusFCLayer
|
import torch
from torch import nn
class Fusion3_MinusFCLayer(nn.Module):
def __init__(self, input_dim):
super(Fusion3_MinusFCLayer, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 6, input_dim)
def forward(self, input1, input2, input3):
norm_input = self._norm_layer1(torch.cat([input1, input2, input3,
input1 - input2, input1 - input3, input2 - input3], dim=-1))
return norm_input
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 24
x1 = xindex // 24
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 - tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp19, tmp22, tmp23)
tmp25 = tmp0 >= tmp17
tmp26 = tl.full([1], 20, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tl.load(in_ptr0 + (4 * x1 + (-16 + x0)), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr2 + (4 * x1 + (-16 + x0)), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tmp29 - tmp30
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp28, tmp31, tmp32)
tmp34 = tmp0 >= tmp26
tl.full([1], 24, tl.int64)
tmp37 = tl.load(in_ptr1 + (4 * x1 + (-20 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (4 * x1 + (-20 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tmp37 - tmp38
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp34, tmp39, tmp40)
tmp42 = tl.where(tmp28, tmp33, tmp41)
tmp43 = tl.where(tmp19, tmp24, tmp42)
tmp44 = tl.where(tmp14, tmp15, tmp43)
tmp45 = tl.where(tmp9, tmp10, tmp44)
tmp46 = tl.where(tmp4, tmp5, tmp45)
tl.store(out_ptr0 + x2, tmp46, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 24), (24, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1536)](primals_1, primals_2, primals_3,
buf0, 1536, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (64, 24),
(24, 1), 0), reinterpret_tensor(primals_4, (24, 4), (1, 24), 0),
alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 24), (24, 1), 0)
class Fusion3_MinusFCLayerNew(nn.Module):
def __init__(self, input_dim):
super(Fusion3_MinusFCLayerNew, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 6, input_dim)
def forward(self, input_0, input_1, input_2):
primals_4 = self._norm_layer1.weight
primals_5 = self._norm_layer1.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
RUCAIBox/WSDM2022-C2CRS
|
Fusion3_MinusFCLayer
| false
| 17,850
|
[
"MIT"
] | 4
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
https://github.com/RUCAIBox/WSDM2022-C2CRS/tree/8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
BinaryNLLEntropy
|
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.nn.modules.loss import _Loss
import torch.jit
class BinaryNLLEntropy(_Loss):
def __init__(self, size_average=True):
super(BinaryNLLEntropy, self).__init__()
self.size_average = size_average
def forward(self, net_output, label_output):
"""
:param net_output: batch_size x
:param labels:
:return:
"""
batch_size = net_output.size(0)
loss = F.binary_cross_entropy_with_logits(net_output, label_output,
size_average=self.size_average)
if self.size_average is False:
loss /= batch_size
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.checkpoint
from torch.nn.modules.loss import _Loss
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BinaryNLLEntropyNew(_Loss):
def __init__(self, size_average=True):
super(BinaryNLLEntropyNew, self).__init__()
self.size_average = size_average
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
RoderickGu/Pretraining_GPT
|
BinaryNLLEntropy
| false
| 17,851
|
[
"Apache-2.0"
] | 4
|
0a3ecd38116dc271e273f57490b9b45b660bf401
|
https://github.com/RoderickGu/Pretraining_GPT/tree/0a3ecd38116dc271e273f57490b9b45b660bf401
|
GAT
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W = Parameter(torch.empty(size=(in_features, out_features)))
self.a = Parameter(torch.empty(size=(2 * out_features, 1)))
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.init_weight()
def init_weight(self):
nn.init.xavier_uniform_(self.W, gain=1.414)
nn.init.xavier_uniform_(self.a, gain=1.414)
def _prepare_attentional_mechanism_input(self, Wh):
N = Wh.shape[0]
Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0)
Wh_repeated_alternating = Wh.repeat(N, 1)
all_combinations_matrix = torch.cat([Wh_repeated_in_chunks,
Wh_repeated_alternating], dim=1)
return all_combinations_matrix.view(N, N, 2 * self.out_features
), Wh_repeated_in_chunks, Wh_repeated_alternating
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
a_input, _Wh_repeated_in_chunks, _Wh_repeated_alternating = (self.
_prepare_attentional_mechanism_input(Wh))
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
return h_prime
class GAT(nn.Module):
def __init__(self, node_feat, node_hid, dropout, alpha, nheads, concat=
False):
"""Dense/multi-head version of GAT."""
super(GAT, self).__init__()
self.dropout = dropout
self.concat = concat
self.attentions = [GraphAttentionLayer(node_feat, node_hid, dropout
=dropout, alpha=alpha) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
if self.concat:
y = torch.cat([att(x, adj) for att in self.attentions], dim=1)
else:
y = torch.mean(torch.stack([att(x, adj) for att in self.
attentions]), dim=0)
y = F.dropout(y, self.dropout, training=self.training)
y = F.elu(y)
return y
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'node_feat': 4, 'node_hid': 4, 'dropout': 0.5, 'alpha': 4,
'nheads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * (x1 // 4) + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp40 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp41 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp51 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp52 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp58 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp74 = tl.load(in_ptr5 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp75 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp80 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp85 = tl.load(in_ptr5 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp86 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp91 = tl.load(in_ptr5 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp92 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp108 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp109 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp114 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp119 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp120 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp125 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp126 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
tl.store(out_ptr2 + x0, tmp62, xmask)
tl.store(out_ptr3 + x0, tmp73, xmask)
tl.store(out_ptr4 + x0, tmp96, xmask)
tl.store(out_ptr5 + x0, tmp107, xmask)
tl.store(out_ptr6 + x0, tmp130, xmask)
tl.store(out_ptr7 + x0, tmp141, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + x2, xmask).to(tl.int1)
tmp14 = tl.load(in_out_ptr1 + x2, xmask)
tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr7 + x2, xmask).to(tl.int1)
tmp24 = tl.load(in_out_ptr2 + x2, xmask)
tmp28 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr10 + x2, xmask).to(tl.int1)
tmp34 = tl.load(in_out_ptr3 + x2, xmask)
tmp38 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(in_out_ptr0 + x2, tmp12, xmask)
tl.store(in_out_ptr1 + x2, tmp22, xmask)
tl.store(in_out_ptr2 + x2, tmp32, xmask)
tl.store(in_out_ptr3 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_elu_mean_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 0.0
tmp10 = tmp8 > tmp9
tmp11 = 1.0
tmp12 = tmp8 * tmp11
tmp13 = libdevice.expm1(tmp12)
tmp14 = tmp13 * tmp11
tmp15 = tl.where(tmp10, tmp12, tmp14)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](primals_4, buf4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf9, buf10, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf10, primals_6, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf11, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf17, buf18, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf18, primals_8, out=buf19)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf19, buf20, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf25, buf26, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf26, primals_10, out=buf27)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf27, buf28, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf30 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_2[grid(4)](buf4,
buf3, buf2, buf12, buf11, buf20, buf19, buf28, buf27, buf5,
buf6, buf13, buf14, buf21, buf22, buf29, buf30, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
buf15 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0)
del buf11
buf23 = reinterpret_tensor(buf19, (4, 4), (4, 1), 0)
del buf19
buf31 = reinterpret_tensor(buf27, (4, 4), (4, 1), 0)
del buf27
triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(16)](buf7,
buf15, buf23, buf31, buf4, buf3, buf5, buf6, buf12, buf13,
buf14, buf20, buf21, buf22, buf28, buf29, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del buf21
del buf22
del buf29
del buf30
del buf5
del buf6
buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf8 = reinterpret_tensor(buf33, (4, 4), (4, 1), 0)
extern_kernels.mm(buf7, buf0, out=buf8)
buf16 = reinterpret_tensor(buf33, (4, 4), (4, 1), 16)
extern_kernels.mm(buf15, buf9, out=buf16)
buf24 = reinterpret_tensor(buf33, (4, 4), (4, 1), 32)
extern_kernels.mm(buf23, buf17, out=buf24)
buf32 = reinterpret_tensor(buf33, (4, 4), (4, 1), 48)
extern_kernels.mm(buf31, buf25, out=buf32)
buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf35 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_elu_mean_4[grid(16)](buf33, buf34, buf35, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf16
del buf24
del buf32
del buf33
del buf8
return (buf35, buf3, buf4, buf7, buf12, buf15, buf20, buf23, buf28,
buf31, buf34, reinterpret_tensor(buf25, (4, 4), (1, 4), 0),
reinterpret_tensor(buf26, (8, 16), (1, 8), 0), reinterpret_tensor(
primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4
), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0),
reinterpret_tensor(buf18, (8, 16), (1, 8), 0), reinterpret_tensor(
primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1,
4), 0), reinterpret_tensor(buf10, (8, 16), (1, 8), 0),
reinterpret_tensor(primals_6, (1, 8), (1, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(
buf1, (8, 16), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1,
1), 0))
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W = Parameter(torch.empty(size=(in_features, out_features)))
self.a = Parameter(torch.empty(size=(2 * out_features, 1)))
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.init_weight()
def init_weight(self):
nn.init.xavier_uniform_(self.W, gain=1.414)
nn.init.xavier_uniform_(self.a, gain=1.414)
def _prepare_attentional_mechanism_input(self, Wh):
N = Wh.shape[0]
Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0)
Wh_repeated_alternating = Wh.repeat(N, 1)
all_combinations_matrix = torch.cat([Wh_repeated_in_chunks,
Wh_repeated_alternating], dim=1)
return all_combinations_matrix.view(N, N, 2 * self.out_features
), Wh_repeated_in_chunks, Wh_repeated_alternating
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
a_input, _Wh_repeated_in_chunks, _Wh_repeated_alternating = (self.
_prepare_attentional_mechanism_input(Wh))
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
return h_prime
class GATNew(nn.Module):
def __init__(self, node_feat, node_hid, dropout, alpha, nheads, concat=
False):
"""Dense/multi-head version of GAT."""
super(GATNew, self).__init__()
self.dropout = dropout
self.concat = concat
self.attentions = [GraphAttentionLayer(node_feat, node_hid, dropout
=dropout, alpha=alpha) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, input_0, input_1):
primals_1 = self.attention_0.W
primals_3 = self.attention_0.a
primals_2 = self.attention_1.W
primals_6 = self.attention_1.a
primals_4 = self.attention_2.W
primals_8 = self.attention_2.a
primals_5 = self.attention_3.W
primals_10 = self.attention_3.a
primals_7 = input_0
primals_9 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
RidongHan/GHE-LPC
|
GAT
| false
| 17,852
|
[
"MIT"
] | 4
|
2a10f423d747aa28560a3bcbf29f7ec87422beb8
|
https://github.com/RidongHan/GHE-LPC/tree/2a10f423d747aa28560a3bcbf29f7ec87422beb8
|
NormKLLoss
|
import torch
import torch.utils.checkpoint
import torch as th
from torch.nn.modules.loss import _Loss
import torch.jit
class NormKLLoss(_Loss):
def __init__(self, unit_average=False):
super(NormKLLoss, self).__init__()
self.unit_average = unit_average
def forward(self, recog_mu, recog_logvar, prior_mu, prior_logvar):
loss = 1.0 + (recog_logvar - prior_logvar)
loss -= th.div(th.pow(prior_mu - recog_mu, 2), th.exp(prior_logvar))
loss -= th.div(th.exp(recog_logvar), th.exp(prior_logvar))
if self.unit_average:
kl_loss = -0.5 * th.mean(loss, dim=1)
else:
kl_loss = -0.5 * th.sum(loss, dim=1)
avg_kl_loss = th.mean(kl_loss)
return avg_kl_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.checkpoint
from torch.nn.modules.loss import _Loss
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_mean_mul_pow_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp6 = tl.load(in_ptr3 + (r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr3 + (16 + r0 + 64 * r1), None)
tmp30 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp31 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp34 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp35 = tl.load(in_ptr3 + (32 + r0 + 64 * r1), None)
tmp45 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp46 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp49 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp50 = tl.load(in_ptr3 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl_math.exp(tmp1)
tmp10 = tmp8 / tmp9
tmp11 = tmp4 - tmp10
tmp12 = tl_math.exp(tmp0)
tmp13 = tmp12 / tmp9
tmp14 = tmp11 - tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp17 + tmp3
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tl_math.exp(tmp16)
tmp24 = tmp22 / tmp23
tmp25 = tmp18 - tmp24
tmp26 = tl_math.exp(tmp15)
tmp27 = tmp26 / tmp23
tmp28 = tmp25 - tmp27
tmp29 = tmp14 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 + tmp3
tmp36 = tmp34 - tmp35
tmp37 = tmp36 * tmp36
tmp38 = tl_math.exp(tmp31)
tmp39 = tmp37 / tmp38
tmp40 = tmp33 - tmp39
tmp41 = tl_math.exp(tmp30)
tmp42 = tmp41 / tmp38
tmp43 = tmp40 - tmp42
tmp44 = tmp29 + tmp43
tmp47 = tmp45 - tmp46
tmp48 = tmp47 + tmp3
tmp51 = tmp49 - tmp50
tmp52 = tmp51 * tmp51
tmp53 = tl_math.exp(tmp46)
tmp54 = tmp52 / tmp53
tmp55 = tmp48 - tmp54
tmp56 = tl_math.exp(tmp45)
tmp57 = tmp56 / tmp53
tmp58 = tmp55 - tmp57
tmp59 = tmp44 + tmp58
tmp60 = -0.5
tmp61 = tmp59 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = tl.sum(tmp62, 1)[:, None]
tmp65 = 64.0
tmp66 = tmp64 / tmp65
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp66, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_div_exp_mean_mul_pow_sub_sum_0[grid(1)](buf2,
arg0_1, arg1_1, arg2_1, arg3_1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf2,
class NormKLLossNew(_Loss):
def __init__(self, unit_average=False):
super(NormKLLossNew, self).__init__()
self.unit_average = unit_average
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
RoderickGu/Pretraining_GPT
|
NormKLLoss
| false
| 17,853
|
[
"Apache-2.0"
] | 4
|
0a3ecd38116dc271e273f57490b9b45b660bf401
|
https://github.com/RoderickGu/Pretraining_GPT/tree/0a3ecd38116dc271e273f57490b9b45b660bf401
|
first_conv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class first_conv(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
super(first_conv, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias)
self.layer_type = 'FConv2d'
self.transform = None
def forward(self, x):
restore_w = self.weight
max = restore_w.data.max()
weight_q = restore_w.div(max).mul(127).round().div(127).mul(max)
weight_q = (weight_q - restore_w).detach() + restore_w
return F.conv2d(x, weight_q, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_max_mul_round_sub_0(in_ptr0, out_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tmp4 = tmp0 / tmp3
tmp5 = 127.0
tmp6 = tmp4 * tmp5
tmp7 = libdevice.nearbyint(tmp6)
tmp8 = 0.007874015748031496
tmp9 = tmp7 * tmp8
tmp10 = tmp9 * tmp3
tmp11 = tmp10 - tmp0
tmp12 = tmp11 + tmp0
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp12, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_max_mul_round_sub_0[grid(1)](primals_1,
buf1, 1, 256, num_warps=2, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(primals_2, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
return buf2, primals_2, buf1
class first_convNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
super(first_convNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias)
self.layer_type = 'FConv2d'
self.transform = None
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
RuiLin0212/BATMANN
|
first_conv
| false
| 17,854
|
[
"MIT"
] | 6
|
5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
https://github.com/RuiLin0212/BATMANN/tree/5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
Hidden2Discrete
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
import torch.jit
class Hidden2Discrete(nn.Module):
def __init__(self, input_size, y_size, k_size, is_lstm=False, has_bias=True
):
super(Hidden2Discrete, self).__init__()
self.y_size = y_size
self.k_size = k_size
latent_size = self.k_size * self.y_size
if is_lstm:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.p_c = nn.Linear(input_size, latent_size, bias=has_bias)
else:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.is_lstm = is_lstm
def forward(self, inputs):
"""
:param inputs: batch_size x input_size
:return:
"""
if self.is_lstm:
h, c = inputs
if h.dim() == 3:
h = h.squeeze(0)
c = c.squeeze(0)
logits = self.p_h(h) + self.p_c(c)
else:
logits = self.p_h(inputs)
logits = logits.view(-1, self.k_size)
log_qy = F.log_softmax(logits, dim=1)
return logits, log_qy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'y_size': 4, 'k_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.checkpoint
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(1024)](buf0, buf1, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(1024)](buf1, buf2, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf0, (256, 4), (4, 1), 0
), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class Hidden2DiscreteNew(nn.Module):
def __init__(self, input_size, y_size, k_size, is_lstm=False, has_bias=True
):
super(Hidden2DiscreteNew, self).__init__()
self.y_size = y_size
self.k_size = k_size
latent_size = self.k_size * self.y_size
if is_lstm:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.p_c = nn.Linear(input_size, latent_size, bias=has_bias)
else:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.is_lstm = is_lstm
def forward(self, input_0):
primals_1 = self.p_h.weight
primals_2 = self.p_h.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
RoderickGu/Pretraining_GPT
|
Hidden2Discrete
| false
| 17,855
|
[
"Apache-2.0"
] | 4
|
0a3ecd38116dc271e273f57490b9b45b660bf401
|
https://github.com/RoderickGu/Pretraining_GPT/tree/0a3ecd38116dc271e273f57490b9b45b660bf401
|
Generator
|
import torch
import torch.distributed
import torch
import torch.nn as nn
def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1):
while True:
gumbels = -torch.empty_like(logits).exponential_().log()
gumbels = (logits + gumbels) / tau
if log_mode:
y_soft = gumbels.log_softmax(dim)
else:
y_soft = gumbels.softmax(dim)
if torch.sum(torch.isnan(y_soft)).item() < 0.01:
break
if hard:
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
ret = y_soft
return ret
class Generator(nn.Module):
def __init__(self, vocab_size, dec_hidden_size, pad_idx):
super(Generator, self).__init__()
self.linear = nn.Linear(dec_hidden_size, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.pad_idx = pad_idx
def forward(self, x, use_gumbel_softmax=False):
output = self.linear(x)
output[:, self.pad_idx] = -float('inf')
if use_gumbel_softmax:
output = gumbel_softmax(output, log_mode=True, dim=-1)
else:
output = self.softmax(output)
return output
def get_inputs():
return [torch.rand([4, 5, 4, 4])]
def get_init_inputs():
return [[], {'vocab_size': 4, 'dec_hidden_size': 4, 'pad_idx': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.distributed
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 5
x5 = xindex
x6 = xindex // 4
tmp3 = tl.load(in_ptr0 + x5, xmask)
tmp6 = tl.load(in_ptr0 + 4 * x6, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x6), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp0 = x2
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tl.where(tmp2, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tl.where(tmp2, tmp4, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tl.where(tmp2, tmp4, tmp14)
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp17 = tmp5 - tmp16
tl.store(out_ptr0 + x5, tmp17, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((80, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (80,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(320)](buf0, buf1, 320, XBLOCK=
128, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 5, 4, 4), (80, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(320)](buf1, buf2, 320, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), buf2
def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1):
while True:
gumbels = -torch.empty_like(logits).exponential_().log()
gumbels = (logits + gumbels) / tau
if log_mode:
y_soft = gumbels.log_softmax(dim)
else:
y_soft = gumbels.softmax(dim)
if torch.sum(torch.isnan(y_soft)).item() < 0.01:
break
if hard:
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
ret = y_soft
return ret
class GeneratorNew(nn.Module):
def __init__(self, vocab_size, dec_hidden_size, pad_idx):
super(GeneratorNew, self).__init__()
self.linear = nn.Linear(dec_hidden_size, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.pad_idx = pad_idx
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RowitZou/CG-nAR
|
Generator
| false
| 17,856
|
[
"MIT"
] | 8
|
8e2debeb3170045592b3b674ea6f9b56251e71f4
|
https://github.com/RowitZou/CG-nAR/tree/8e2debeb3170045592b3b674ea6f9b56251e71f4
|
last_fc
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class last_fc(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(last_fc, self).__init__(in_features, out_features, bias)
self.layer_type = 'LFC'
self.transform = None
def forward(self, x):
restore_w = self.weight
max = restore_w.data.max()
weight_q = restore_w.div(max).mul(127).round().div(127).mul(max)
weight_q = (weight_q - restore_w).detach() + restore_w
return F.linear(x, weight_q, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_max_mul_round_sub_0(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 / tmp3
tmp5 = 127.0
tmp6 = tmp4 * tmp5
tmp7 = libdevice.nearbyint(tmp6)
tmp8 = 0.007874015748031496
tmp9 = tmp7 * tmp8
tmp10 = tmp9 * tmp3
tmp11 = tmp10 - tmp0
tmp12 = tmp11 + tmp0
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_max_mul_round_sub_0[grid(1)](primals_1,
buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del buf1
del primals_2
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class last_fcNew(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(last_fcNew, self).__init__(in_features, out_features, bias)
self.layer_type = 'LFC'
self.transform = None
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RuiLin0212/BATMANN
|
last_fc
| false
| 17,857
|
[
"MIT"
] | 6
|
5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
https://github.com/RuiLin0212/BATMANN/tree/5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
TransformerEncoderFeedForward
|
import torch
import torch.nn as nn
class Dense(nn.Module):
def __init__(self, in_dim, out_dim, use_bias=True, activation=None,
name=None):
super(Dense, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.use_bias = use_bias
self.activation = activation
self.name = name if name else 'dense'
self.fc = nn.Linear(self.in_dim, self.out_dim, bias=self.use_bias)
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc.weight)
if self.use_bias:
nn.init.zeros_(self.fc.bias)
def forward(self, inputs):
x = self.fc(inputs)
if self.activation:
x = self.activation(x)
return x
class TransformerEncoderFeedForward(nn.Module):
def __init__(self, in_dim, out_dim, drop_out_proba, expansion_rate,
name=None):
super(TransformerEncoderFeedForward, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.drop_out_proba = drop_out_proba
self.expansion_rate = expansion_rate
self.name = name if name else 'Transformer-Encoder__Feed-Forward'
self.hidden_dense = Dense(in_dim=self.in_dim, out_dim=self.out_dim *
self.expansion_rate, use_bias=True, activation=nn.ReLU(), name=
f'{self.name}__Hidden-Dense')
self.output_dense = Dense(in_dim=self.out_dim * self.expansion_rate,
out_dim=self.out_dim, use_bias=True, activation=None, name=
f'{self.name}__Out-Dense')
self.dropout = nn.Dropout(p=self.drop_out_proba)
self.norm = nn.LayerNorm(normalized_shape=self.out_dim)
def forward(self, inputs):
hidden_values = self.hidden_dense(inputs)
output = self.output_dense(hidden_values)
output = self.dropout(output)
return self.norm(inputs + output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'drop_out_proba': 0.5,
'expansion_rate': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf6, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf2,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](primals_3, buf2,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 16), (
16, 1), 0), buf2, primals_4, buf6
class Dense(nn.Module):
def __init__(self, in_dim, out_dim, use_bias=True, activation=None,
name=None):
super(Dense, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.use_bias = use_bias
self.activation = activation
self.name = name if name else 'dense'
self.fc = nn.Linear(self.in_dim, self.out_dim, bias=self.use_bias)
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc.weight)
if self.use_bias:
nn.init.zeros_(self.fc.bias)
def forward(self, inputs):
x = self.fc(inputs)
if self.activation:
x = self.activation(x)
return x
class TransformerEncoderFeedForwardNew(nn.Module):
def __init__(self, in_dim, out_dim, drop_out_proba, expansion_rate,
name=None):
super(TransformerEncoderFeedForwardNew, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.drop_out_proba = drop_out_proba
self.expansion_rate = expansion_rate
self.name = name if name else 'Transformer-Encoder__Feed-Forward'
self.hidden_dense = Dense(in_dim=self.in_dim, out_dim=self.out_dim *
self.expansion_rate, use_bias=True, activation=nn.ReLU(), name=
f'{self.name}__Hidden-Dense')
self.output_dense = Dense(in_dim=self.out_dim * self.expansion_rate,
out_dim=self.out_dim, use_bias=True, activation=None, name=
f'{self.name}__Out-Dense')
self.dropout = nn.Dropout(p=self.drop_out_proba)
self.norm = nn.LayerNorm(normalized_shape=self.out_dim)
def forward(self, input_0):
primals_1 = self.hidden_dense.fc.weight
primals_2 = self.hidden_dense.fc.bias
primals_4 = self.output_dense.fc.weight
primals_5 = self.output_dense.fc.bias
primals_6 = self.norm.weight
primals_7 = self.norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
RoySadaka/lpd
|
TransformerEncoderFeedForward
| false
| 17,858
|
[
"MIT"
] | 4
|
921454d9730d8228f4b0ca5349b0558ebd123c65
|
https://github.com/RoySadaka/lpd/tree/921454d9730d8228f4b0ca5349b0558ebd123c65
|
MultiHeadAttention
|
import torch
import torch as th
import torch.nn as nn
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = th.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = th.softmax(x, dim=3)
attn_mat = x
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x, attn_mat
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'hidden_size': 4, 'attention_dropout_rate': 0.5,
'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_mul_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_mul_0[grid(16, 4)](buf0, primals_3, buf3, 16,
4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttentionNew, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_10 = self.output_layer.weight
primals_11 = self.output_layer.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
Roestlab/massformer
|
MultiHeadAttention
| false
| 17,859
|
[
"BSD-2-Clause"
] | 6
|
c6324970c392f8ee96651679f49d21e430caa0c9
|
https://github.com/Roestlab/massformer/tree/c6324970c392f8ee96651679f49d21e430caa0c9
|
SelfAttn
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
import torch as th
import torch.jit
class SelfAttn(nn.Module):
def __init__(self, hidden_size):
super(SelfAttn, self).__init__()
self.query = nn.Linear(hidden_size, 1)
def forward(self, keys, values, attn_mask=None):
"""
:param attn_inputs: batch_size x time_len x hidden_size
:param attn_mask: batch_size x time_len
:return: summary state
"""
alpha = F.softmax(self.query(keys), dim=1)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / th.sum(alpha, dim=1, keepdim=True)
summary = th.sum(values * alpha, dim=1)
return summary
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.checkpoint
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_mul_sum_2[grid(64)](primals_4, buf3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
return buf4, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1
class SelfAttnNew(nn.Module):
def __init__(self, hidden_size):
super(SelfAttnNew, self).__init__()
self.query = nn.Linear(hidden_size, 1)
def forward(self, input_0, input_1):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RoderickGu/Pretraining_GPT
|
SelfAttn
| false
| 17,860
|
[
"Apache-2.0"
] | 4
|
0a3ecd38116dc271e273f57490b9b45b660bf401
|
https://github.com/RoderickGu/Pretraining_GPT/tree/0a3ecd38116dc271e273f57490b9b45b660bf401
|
AttentionBlock
|
import math
import torch
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
def convert_pad_shape(pad_shape):
"""
Used to get arguments for F.pad
"""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class AttentionBlock(nn.Module):
def __init__(self, channels, out_channels, n_heads, window_size=None,
heads_share=True, p_dropout=0.0, block_length=None, proximal_bias=
False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
nn.init.xavier_uniform_(self.conv_v.weight)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self
.k_channels)
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query,
key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(
rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
if self.block_length is not None:
block_mask = torch.ones_like(scores).triu(-self.block_length
).tril(self.block_length)
scores = scores * block_mask + -10000.0 * (1 - block_mask)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(relative_embeddings,
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0,
length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length -
1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])
)
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf3, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf4 = buf1
del buf1
triton_poi_fused_convolution_0[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = buf2
del buf2
triton_poi_fused_convolution_0[grid(64)](buf8, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4,
4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf10, (4, 4, 4), (16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_0[grid(64)](buf11, primals_10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return (buf11, buf7, primals_1, primals_3, primals_4, primals_6,
primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16,
4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0),
reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
def convert_pad_shape(pad_shape):
"""
Used to get arguments for F.pad
"""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class AttentionBlockNew(nn.Module):
def __init__(self, channels, out_channels, n_heads, window_size=None,
heads_share=True, p_dropout=0.0, block_length=None, proximal_bias=
False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
nn.init.xavier_uniform_(self.conv_v.weight)
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self
.k_channels)
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query,
key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(
rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
if self.block_length is not None:
block_mask = torch.ones_like(scores).triu(-self.block_length
).tril(self.block_length)
scores = scores * block_mask + -10000.0 * (1 - block_mask)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(relative_embeddings,
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0,
length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length -
1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])
)
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def forward(self, input_0, input_1):
primals_1 = self.conv_q.weight
primals_2 = self.conv_q.bias
primals_4 = self.conv_k.weight
primals_5 = self.conv_k.bias
primals_7 = self.conv_v.weight
primals_8 = self.conv_v.bias
primals_9 = self.conv_o.weight
primals_10 = self.conv_o.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Royeqiu/Nemo_ASR
|
AttentionBlock
| false
| 17,861
|
[
"Apache-2.0"
] | 10
|
12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
https://github.com/Royeqiu/Nemo_ASR/tree/12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
Classifier
|
import torch
import torch.distributed
import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask_cls):
h = self.linear1(x).squeeze(-1)
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.distributed
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf1, primals_4, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1
class ClassifierNew(nn.Module):
def __init__(self, hidden_size):
super(ClassifierNew, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RowitZou/CG-nAR
|
Classifier
| false
| 17,862
|
[
"MIT"
] | 8
|
8e2debeb3170045592b3b674ea6f9b56251e71f4
|
https://github.com/RowitZou/CG-nAR/tree/8e2debeb3170045592b3b674ea6f9b56251e71f4
|
FCN8VGG16
|
import torch
import numpy as np
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
def conv3x3(in_planes, out_planes, stride=1, padding=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), stride=(
stride, stride), padding=(padding, padding))
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) /
factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
class FCN8VGG16(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.n_classes = n_classes
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.relu = nn.ReLU(inplace=True)
self.conv1_1 = conv3x3(3, 64, stride=1, padding=100)
self.conv1_2 = conv3x3(64, 64)
self.conv2_1 = conv3x3(64, 128)
self.conv2_2 = conv3x3(128, 128)
self.conv3_1 = conv3x3(128, 256)
self.conv3_2 = conv3x3(256, 256)
self.conv3_3 = conv3x3(256, 256)
self.conv4_1 = conv3x3(256, 512)
self.conv4_2 = conv3x3(512, 512)
self.conv4_3 = conv3x3(512, 512)
self.conv5_1 = conv3x3(512, 512)
self.conv5_2 = conv3x3(512, 512)
self.conv5_3 = conv3x3(512, 512)
self.fc6 = nn.Conv2d(512, 4096, kernel_size=7, stride=1, padding=0)
self.dropout_f6 = nn.Dropout()
self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, stride=1, padding=0)
self.dropout_f7 = nn.Dropout()
self.scoring_layer = nn.Conv2d(4096, self.n_classes, kernel_size=1,
stride=1, padding=0)
self.upscore2 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(self.n_classes, self.
n_classes, kernel_size=4, stride=2, bias=False)
self.upscore8 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=16, stride=8, bias=False)
self.scoring_layer.weight.data.zero_()
self.scoring_layer.bias.data.zero_()
self.score_pool3 = nn.Conv2d(256, self.n_classes, kernel_size=1)
self.score_pool4 = nn.Conv2d(512, self.n_classes, kernel_size=1)
self.score_pool3.weight.data.zero_()
self.score_pool3.bias.data.zero_()
self.score_pool4.weight.data.zero_()
self.score_pool4.bias.data.zero_()
self.upscore2.weight.data.copy_(get_upsampling_weight(self.
n_classes, self.n_classes, 4))
self.upscore_pool4.weight.data.copy_(get_upsampling_weight(self.
n_classes, self.n_classes, 4))
self.upscore8.weight.data.copy_(get_upsampling_weight(self.
n_classes, self.n_classes, 16))
pth_url = 'https://download.pytorch.org/models/vgg16-397923af.pth'
state_dict = model_zoo.load_url(pth_url)
layer_names = [layer_name for layer_name in state_dict]
counter = 0
for p in self.parameters():
if counter < 26:
p.data = state_dict[layer_names[counter]]
elif counter == 26:
p.data = state_dict[layer_names[counter]].view(4096, 512, 7, 7)
elif counter == 27:
p.data = state_dict[layer_names[counter]]
elif counter == 28:
p.data = state_dict[layer_names[counter]].view(4096, 4096, 1, 1
)
elif counter == 29:
p.data = state_dict[layer_names[counter]]
counter += 1
def forward(self, x):
_n, _c, h, w = x.size()
conv1_1 = self.relu(self.conv1_1(x))
conv1_2 = self.relu(self.conv1_2(conv1_1))
pool1 = self.pool(conv1_2)
conv2_1 = self.relu(self.conv2_1(pool1))
conv2_2 = self.relu(self.conv2_2(conv2_1))
pool2 = self.pool(conv2_2)
conv3_1 = self.relu(self.conv3_1(pool2))
conv3_2 = self.relu(self.conv3_2(conv3_1))
conv3_3 = self.relu(self.conv3_3(conv3_2))
pool3 = self.pool(conv3_3)
conv4_1 = self.relu(self.conv4_1(pool3))
conv4_2 = self.relu(self.conv4_2(conv4_1))
conv4_3 = self.relu(self.conv4_3(conv4_2))
pool4 = self.pool(conv4_3)
conv5_1 = self.relu(self.conv5_1(pool4))
conv5_2 = self.relu(self.conv5_2(conv5_1))
conv5_3 = self.relu(self.conv5_3(conv5_2))
pool5 = self.pool(conv5_3)
fc6 = self.dropout_f6(self.relu(self.fc6(pool5)))
fc7 = self.dropout_f7(self.relu(self.fc7(fc6)))
scores = self.scoring_layer(fc7)
upscore2 = self.upscore2(scores)
score_pool4 = self.score_pool4(pool4)
score_pool4c = score_pool4[:, :, 5:5 + upscore2.size(2), 5:5 +
upscore2.size(3)]
upscore_pool4 = self.upscore_pool4(score_pool4c + upscore2)
score_pool3 = self.score_pool3(pool3)
score_pool3c = score_pool3[:, :, 9:9 + upscore_pool4.size(2), 9:9 +
upscore_pool4.size(3)]
output = self.upscore8(score_pool3c + upscore_pool4)
return output[:, :, 31:31 + h, 31:31 + w].contiguous()
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'n_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 49
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 1024 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 17572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 131
x2 = xindex // 8384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 33536 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16768 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (16832 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8448 % 66
x1 = xindex // 128 % 66
x0 = xindex % 128
x3 = xindex // 557568
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 131, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 256 * x1 + 33536 * x2 + 2196608 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16768 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (16896 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 33
x2 = xindex // 8448
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 33792 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (17152 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 17
x1 = xindex // 512 % 17
x0 = xindex % 512
x3 = xindex // 147968
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 33792 * x2 + 557568 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (17408 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4608 % 9
x1 = xindex // 512 % 9
x0 = xindex % 512
x3 = xindex // 41472
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 17408 * x2 + 147968 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (8704 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (9216 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_24(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 32 % 8
x3 = xindex // 256
x4 = xindex % 32
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (360 + x4 + 68 * x2 + 1156 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + x5, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 72 % 18
x3 = xindex // 1296
x4 = xindex % 72
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1224 + x4 + 132 * x2 + 4356 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + x5, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex % 64
x3 = xindex // 64
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (18972 + y0 + 4 * x2 + 608 * x3 + 92416 * y1),
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + 4096 * y4), tmp0, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1))
assert_size_stride(primals_29, (4096,), (1,))
assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_31, (4096,), (1,))
assert_size_stride(primals_32, (4, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_33, (4,), (1,))
assert_size_stride(primals_34, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_35, (4, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_36, (4,), (1,))
assert_size_stride(primals_37, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_38, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_39, (4,), (1,))
assert_size_stride(primals_40, (4, 4, 16, 16), (1024, 256, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
triton_poi_fused_1[grid(192, 9)](primals_2, buf1, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512),
torch.float32)
triton_poi_fused_9[grid(2097152, 49)](primals_28, buf14, 2097152,
49, XBLOCK=32, YBLOCK=64, num_warps=8, num_stages=1)
del primals_28
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_10[grid(16, 16)](primals_34, buf15, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_34
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_10[grid(16, 16)](primals_37, buf16, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_37
buf17 = empty_strided_cuda((4, 4, 16, 16), (1024, 1, 64, 4), torch.
float32)
triton_poi_fused_11[grid(16, 256)](primals_40, buf17, 16, 256,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_40
buf18 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(100, 100), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_12[grid(17572864)](buf19,
primals_3, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_12[grid(17572864)](buf21,
primals_5, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.float32)
buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_13[grid(4393216)](buf21,
buf22, buf23, 4393216, XBLOCK=512, num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_14[grid(8786432)](buf25,
primals_7, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_14[grid(8786432)](buf27,
primals_9, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.float32)
buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(2230272)](buf27,
buf28, buf29, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_16[grid(4460544)](buf31,
primals_11, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_16[grid(4460544)](buf33,
primals_13, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_16[grid(4460544)](buf35,
primals_15, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.float32)
buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_17[grid(1115136)](buf35,
buf36, buf37, 1115136, XBLOCK=1024, num_warps=4, num_stages=1)
buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_18[grid(2230272)](buf39,
primals_17, 2230272, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_18[grid(2230272)](buf41,
primals_19, 2230272, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_18[grid(2230272)](buf43,
primals_21, 2230272, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.float32)
buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(591872)](buf43,
buf44, buf45, 591872, XBLOCK=512, num_warps=8, num_stages=1)
buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_20[grid(591872)](buf47,
primals_23, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_20[grid(591872)](buf49,
primals_25, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_20[grid(591872)](buf51,
primals_27, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.float32)
buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_21[grid(165888)](buf51,
buf52, buf53, 165888, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf55 = buf54
del buf54
triton_poi_fused_convolution_relu_22[grid(147456)](buf55,
primals_29, 147456, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_29
buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_22[grid(147456)](buf57,
primals_31, 147456, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 4, 3, 3), (36, 1, 12, 4))
buf59 = buf58
del buf58
triton_poi_fused_convolution_23[grid(144)](buf59, primals_33, 144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_33
buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 4, 8, 8), (256, 1, 32, 4))
buf61 = extern_kernels.convolution(buf44, primals_35, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 4, 17, 17), (1156, 1, 68, 4))
buf62 = buf60
del buf60
triton_poi_fused_add_24[grid(1024)](buf62, buf61, primals_36, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del buf61
del primals_36
buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 4, 18, 18), (1296, 1, 72, 4))
buf64 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 4, 33, 33), (4356, 1, 132, 4))
buf65 = buf63
del buf63
triton_poi_fused_add_25[grid(5184)](buf65, buf64, primals_39, 5184,
XBLOCK=128, num_warps=4, num_stages=1)
del buf64
del primals_39
buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 4, 152, 152), (92416, 1, 608, 4))
buf67 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_clone_26[grid(16, 4096)](buf66, buf67, 16, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf66
return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32,
buf15, primals_35, buf16, primals_38, buf17, buf19, buf21, buf22,
buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36,
buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51,
buf52, buf53, buf55, buf57, buf59, buf62, buf65)
def conv3x3(in_planes, out_planes, stride=1, padding=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), stride=(
stride, stride), padding=(padding, padding))
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) /
factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
class FCN8VGG16New(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.n_classes = n_classes
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.relu = nn.ReLU(inplace=True)
self.conv1_1 = conv3x3(3, 64, stride=1, padding=100)
self.conv1_2 = conv3x3(64, 64)
self.conv2_1 = conv3x3(64, 128)
self.conv2_2 = conv3x3(128, 128)
self.conv3_1 = conv3x3(128, 256)
self.conv3_2 = conv3x3(256, 256)
self.conv3_3 = conv3x3(256, 256)
self.conv4_1 = conv3x3(256, 512)
self.conv4_2 = conv3x3(512, 512)
self.conv4_3 = conv3x3(512, 512)
self.conv5_1 = conv3x3(512, 512)
self.conv5_2 = conv3x3(512, 512)
self.conv5_3 = conv3x3(512, 512)
self.fc6 = nn.Conv2d(512, 4096, kernel_size=7, stride=1, padding=0)
self.dropout_f6 = nn.Dropout()
self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, stride=1, padding=0)
self.dropout_f7 = nn.Dropout()
self.scoring_layer = nn.Conv2d(4096, self.n_classes, kernel_size=1,
stride=1, padding=0)
self.upscore2 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(self.n_classes, self.
n_classes, kernel_size=4, stride=2, bias=False)
self.upscore8 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=16, stride=8, bias=False)
self.scoring_layer.weight.data.zero_()
self.scoring_layer.bias.data.zero_()
self.score_pool3 = nn.Conv2d(256, self.n_classes, kernel_size=1)
self.score_pool4 = nn.Conv2d(512, self.n_classes, kernel_size=1)
self.score_pool3.weight.data.zero_()
self.score_pool3.bias.data.zero_()
self.score_pool4.weight.data.zero_()
self.score_pool4.bias.data.zero_()
self.upscore2.weight.data.copy_(get_upsampling_weight(self.
n_classes, self.n_classes, 4))
self.upscore_pool4.weight.data.copy_(get_upsampling_weight(self.
n_classes, self.n_classes, 4))
self.upscore8.weight.data.copy_(get_upsampling_weight(self.
n_classes, self.n_classes, 16))
pth_url = 'https://download.pytorch.org/models/vgg16-397923af.pth'
state_dict = model_zoo.load_url(pth_url)
layer_names = [layer_name for layer_name in state_dict]
counter = 0
for p in self.parameters():
if counter < 26:
p.data = state_dict[layer_names[counter]]
elif counter == 26:
p.data = state_dict[layer_names[counter]].view(4096, 512, 7, 7)
elif counter == 27:
p.data = state_dict[layer_names[counter]]
elif counter == 28:
p.data = state_dict[layer_names[counter]].view(4096, 4096, 1, 1
)
elif counter == 29:
p.data = state_dict[layer_names[counter]]
counter += 1
def forward(self, input_0):
primals_2 = self.conv1_1.weight
primals_3 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv3_3.weight
primals_15 = self.conv3_3.bias
primals_16 = self.conv4_1.weight
primals_17 = self.conv4_1.bias
primals_18 = self.conv4_2.weight
primals_19 = self.conv4_2.bias
primals_20 = self.conv4_3.weight
primals_21 = self.conv4_3.bias
primals_22 = self.conv5_1.weight
primals_23 = self.conv5_1.bias
primals_24 = self.conv5_2.weight
primals_25 = self.conv5_2.bias
primals_26 = self.conv5_3.weight
primals_27 = self.conv5_3.bias
primals_28 = self.fc6.weight
primals_29 = self.fc6.bias
primals_30 = self.fc7.weight
primals_31 = self.fc7.bias
primals_32 = self.scoring_layer.weight
primals_33 = self.scoring_layer.bias
primals_34 = self.upscore2.weight
primals_37 = self.upscore_pool4.weight
primals_40 = self.upscore8.weight
primals_38 = self.score_pool3.weight
primals_36 = self.score_pool3.bias
primals_35 = self.score_pool4.weight
primals_39 = self.score_pool4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40])
return output[0]
|
IssamLaradji/looc
|
FCN8VGG16
| false
| 17,863
|
[
"Apache-2.0"
] | 9
|
50a05b9bf2d36cd8770add8cc65f9bab1ad45841
|
https://github.com/IssamLaradji/looc/tree/50a05b9bf2d36cd8770add8cc65f9bab1ad45841
|
XNOR_BinarizeConv2d
|
from torch.autograd import Function
import torch
import torch.nn as nn
import torch.nn.functional as F
class XNOR_BinaryQuantize(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone().clamp(min=-1, max=1)
return grad_input
class XNOR_BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.sign(input)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input[0].ge(1)] = 0
grad_input[input[1].le(-1)] = 0
return grad_input
class XNOR_BinarizeConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, padding_mode='zeros',
binary_func='deter'):
super(XNOR_BinarizeConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.binary_func = binary_func
w = self.weight
sw = w.abs().view(w.size(0), -1).mean(-1).float().view(w.size(0), 1, 1
).detach()
self.alpha = nn.Parameter(sw, requires_grad=True)
def forward(self, input):
a0 = input
w = self.weight
w1 = w - w.mean([1, 2, 3], keepdim=True)
w2 = w1 / w1.std([1, 2, 3], keepdim=True)
a1 = a0 - a0.mean([1, 2, 3], keepdim=True)
a2 = a1 / a1.std([1, 2, 3], keepdim=True)
bw = XNOR_BinaryQuantize().apply(w2)
ba = XNOR_BinaryQuantize_a().apply(a2)
output = F.conv2d(ba, bw, self.bias, self.stride, self.padding,
self.dilation, self.groups)
output = output * self.alpha
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mean_sign_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tl.where(xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = 63.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = tmp7 / tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = tmp28 < tmp27
tmp30 = tmp29.to(tl.int8)
tmp31 = tmp27 < tmp28
tmp32 = tmp31.to(tl.int8)
tmp33 = tmp30 - tmp32
tmp34 = tmp33.to(tmp27.dtype)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp26, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp34, xmask)
@triton.jit
def triton_per_fused_div_mean_sign_std_sub_1(in_ptr0, out_ptr0, out_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tl.where(xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = 63.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = tmp7 / tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = tmp28 < tmp27
tmp30 = tmp29.to(tl.int8)
tmp31 = tmp27 < tmp28
tmp32 = tmp31.to(tl.int8)
tmp33 = tmp30 - tmp32
tmp34 = tmp33.to(tmp27.dtype)
tl.store(out_ptr2 + (r1 + 64 * x0), tmp34, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf3
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mean_sign_std_sub_0[grid(4)](buf1, buf5,
primals_2, buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_div_mean_sign_std_sub_1[grid(4)](primals_1, buf6,
buf11, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf6
del primals_1
buf12 = extern_kernels.convolution(buf11, buf10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 1, 1), (4, 1, 1, 1))
buf13 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_mul_2[grid(16)](buf12, primals_3, buf13, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf13, primals_2, primals_3, buf1, buf5, buf10, buf11, buf12
class XNOR_BinaryQuantize(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone().clamp(min=-1, max=1)
return grad_input
class XNOR_BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.sign(input)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input[0].ge(1)] = 0
grad_input[input[1].le(-1)] = 0
return grad_input
class XNOR_BinarizeConv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, padding_mode='zeros',
binary_func='deter'):
super(XNOR_BinarizeConv2dNew, self).__init__(in_channels,
out_channels, kernel_size, stride, padding, dilation, groups,
bias, padding_mode)
self.binary_func = binary_func
w = self.weight
sw = w.abs().view(w.size(0), -1).mean(-1).float().view(w.size(0), 1, 1
).detach()
self.alpha = nn.Parameter(sw, requires_grad=True)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.alpha
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RuiLin0212/BATMANN
|
XNOR_BinarizeConv2d
| false
| 17,864
|
[
"MIT"
] | 6
|
5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
https://github.com/RuiLin0212/BATMANN/tree/5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
MOTION_ReplaceBlock_B
|
import torch
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
class MOTION_ReplaceBlock_B(nn.Module):
"""
using diff
"""
def __init__(self, in_channels, n_segment, n_div):
super(MOTION_ReplaceBlock_B, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
def forward(self, x):
"""
:param x: (nt, c, h, w)
:return:(nt, c, h, w)
"""
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
out = torch.zeros_like(x)
out[:, :-1, :self.fold] = x[:, 1:, :self.fold] - x[:, :-1, :self.fold]
out[:, 1:, self.fold:2 * self.fold] = x[:, 1:, self.fold:2 * self.fold
] - x[:, :-1, self.fold:2 * self.fold]
out[:, :, 2 * self.fold:] = x[:, :, 2 * self.fold:]
out = out.view(nt, c, h, w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_segment': 4, 'n_div': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_sub_zeros_like_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x2 = xindex // 64
x0 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + x3, tmp2 & xmask, other=0.0)
tmp4 = x2
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tmp0 >= tmp5
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr0 + (-48 + x0 + 64 * x2), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 - tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = tl.full([1], 3, tl.int64)
tmp17 = tmp4 < tmp16
tmp18 = tmp17 & tmp6
tmp19 = tmp0 < tmp5
tmp20 = tmp19 & tmp18
tmp21 = tl.load(in_ptr0 + (64 + x0 + 64 * x2), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 - tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = 0.0
tmp27 = tl.where(tmp19, tmp25, tmp26)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp18, tmp27, tmp28)
tmp30 = tl.where(tmp17, tmp29, tmp26)
tmp31 = tl.where(tmp9, tmp15, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp6, tmp31, tmp32)
tmp34 = tmp19 & tmp17
tmp35 = tl.load(in_ptr0 + (64 + x0 + 64 * x2), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 - tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp34, tmp37, tmp38)
tmp40 = tl.where(tmp19, tmp39, tmp26)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp17, tmp40, tmp41)
tmp43 = tl.where(tmp17, tmp42, tmp26)
tmp44 = tl.where(tmp6, tmp33, tmp43)
tmp45 = tl.where(tmp2, tmp3, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_sub_zeros_like_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class MOTION_ReplaceBlock_BNew(nn.Module):
"""
using diff
"""
def __init__(self, in_channels, n_segment, n_div):
super(MOTION_ReplaceBlock_BNew, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
RongchangLi/DEN
|
MOTION_ReplaceBlock_B
| false
| 17,865
|
[
"MIT"
] | 4
|
f8b744f96a3a68cf0784080ffd561a5279715727
|
https://github.com/RongchangLi/DEN/tree/f8b744f96a3a68cf0784080ffd561a5279715727
|
MOTION_Channel_ReplaceBlock
|
import torch
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
class MOTION_Channel_ReplaceBlock(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(MOTION_Channel_ReplaceBlock, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.next_frame_conv = nn.Conv2d(in_channels=self.fold,
out_channels=self.fold, kernel_size=3, padding=1, stride=1,
bias=False, groups=self.fold)
self.last_frame_conv = nn.Conv2d(in_channels=self.fold,
out_channels=self.fold, kernel_size=3, padding=1, stride=1,
bias=False, groups=self.fold)
def forward(self, x):
"""
:param x: (nt, c, h, w)
:return:(nt, c, h, w)
"""
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
out = torch.zeros_like(x)
out_part = x.view(nt, c, h, w)[:, :self.fold]
out_part = self.next_frame_conv(out_part)
out_part = out_part.view(n_batch, self.n_segment, self.fold, h, w)
out[:, :-1, :self.fold] = out_part[:, 1:, :self.fold] - x[:, :-1, :
self.fold]
out_part = x.view(nt, c, h, w)[:, self.fold:2 * self.fold]
out_part = self.last_frame_conv(out_part)
out_part = out_part.view(n_batch, self.n_segment, self.fold, h, w)
out[:, 1:, self.fold:2 * self.fold] = x[:, 1:, self.fold:2 * self.fold
] - out_part[:, :-1, :self.fold]
out[:, :, 2 * self.fold:] = x[:, :, 2 * self.fold:]
out = out.view(nt, c, h, w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_segment': 4, 'n_div': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_sub_zeros_like_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x2 = xindex // 64
x0 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + x3, tmp2 & xmask, other=0.0)
tmp4 = x2
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tmp0 >= tmp5
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (-16 + x0 + 16 * x2), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 - tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = tl.full([1], 3, tl.int64)
tmp17 = tmp4 < tmp16
tmp18 = tmp17 & tmp6
tmp19 = tmp0 < tmp5
tmp20 = tmp19 & tmp18
tmp21 = tl.load(in_ptr2 + (16 + x0 + 16 * x2), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 - tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = 0.0
tmp27 = tl.where(tmp19, tmp25, tmp26)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp18, tmp27, tmp28)
tmp30 = tl.where(tmp17, tmp29, tmp26)
tmp31 = tl.where(tmp9, tmp15, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp6, tmp31, tmp32)
tmp34 = tmp19 & tmp17
tmp35 = tl.load(in_ptr2 + (16 + x0 + 16 * x2), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 - tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp34, tmp37, tmp38)
tmp40 = tl.where(tmp19, tmp39, tmp26)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp17, tmp40, tmp41)
tmp43 = tl.where(tmp17, tmp42, tmp26)
tmp44 = tl.where(tmp6, tmp33, tmp43)
tmp45 = tl.where(tmp2, tmp3, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 16, 4, 1), 0), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 16, 4, 1), 16), primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_sub_zeros_like_0[grid(256)](primals_1, buf1,
buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, primals_3, reinterpret_tensor(primals_1, (4, 1, 4, 4),
(64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64,
16, 4, 1), 16)
class MOTION_Channel_ReplaceBlockNew(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(MOTION_Channel_ReplaceBlockNew, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.next_frame_conv = nn.Conv2d(in_channels=self.fold,
out_channels=self.fold, kernel_size=3, padding=1, stride=1,
bias=False, groups=self.fold)
self.last_frame_conv = nn.Conv2d(in_channels=self.fold,
out_channels=self.fold, kernel_size=3, padding=1, stride=1,
bias=False, groups=self.fold)
def forward(self, input_0):
primals_2 = self.next_frame_conv.weight
primals_3 = self.last_frame_conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RongchangLi/DEN
|
MOTION_Channel_ReplaceBlock
| false
| 17,866
|
[
"MIT"
] | 4
|
f8b744f96a3a68cf0784080ffd561a5279715727
|
https://github.com/RongchangLi/DEN/tree/f8b744f96a3a68cf0784080ffd561a5279715727
|
DiceBCELoss
|
import torch
from torch import nn
import torch.nn.functional as F
class DiceBCELoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceBCELoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = torch.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice_loss = 1 - (2.0 * intersection + smooth) / (inputs.sum() +
targets.sum() + smooth)
BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')
Dice_BCE = BCE + dice_loss
return Dice_BCE
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp4 * tmp0
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp4, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tl.broadcast_to(tmp0, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp16 / tmp27
tmp29 = 2.0
tmp30 = tmp20 * tmp29
tmp31 = tmp30 + tmp1
tmp32 = tmp23 + tmp26
tmp33 = tmp32 + tmp1
tmp34 = tmp31 / tmp33
tmp35 = tmp1 - tmp34
tmp36 = tmp28 + tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0[grid(1)](
buf4, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class DiceBCELossNew(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceBCELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
SH-96/polyp-segmentation-pytorch
|
DiceBCELoss
| false
| 17,867
|
[
"MIT"
] | 3
|
14ecd2998874a4d26c442bacc3ec69c2d42642f1
|
https://github.com/SH-96/polyp-segmentation-pytorch/tree/14ecd2998874a4d26c442bacc3ec69c2d42642f1
|
LayerNorm
|
import torch
from torch import nn
import torch.utils.data
import torch.optim
class LayerNorm(nn.Module):
def __init__(self, channels, eps=0.0001):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
n_dims = len(x.shape)
mean = torch.mean(x, 1, keepdim=True)
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
x = (x - mean) * torch.rsqrt(variance + self.eps)
shape = [1, -1] + [1] * (n_dims - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 0.0001
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0,
primals_2, primals_3, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormNew(nn.Module):
def __init__(self, channels, eps=0.0001):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Royeqiu/Nemo_ASR
|
LayerNorm
| false
| 17,868
|
[
"Apache-2.0"
] | 10
|
12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
https://github.com/Royeqiu/Nemo_ASR/tree/12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
MultiHeadAttention
|
import math
import torch
from torch import nn
import torch.utils.data
import torch.optim
class MultiHeadAttention(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout
=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (hidden_size, num_attention_heads))
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, queries, keys, values, attention_mask):
query = self.query_net(queries)
key = self.key_net(keys)
value = self.value_net(values)
query = self.transpose_for_scores(query) / self.attn_scale
key = self.transpose_for_scores(key) / self.attn_scale
value = self.transpose_for_scores(value)
attention_scores = torch.matmul(query, key.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.attn_dropout(attention_probs)
context = torch.matmul(attention_probs, value)
context = context.permute(0, 2, 1, 3).contiguous()
new_context_shape = context.size()[:-2] + (self.hidden_size,)
context = context.view(*new_context_shape)
output_states = self.out_projection(context)
output_states = self.layer_dropout(output_states)
return output_states
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_attention_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(16, 4)](buf0, primals_2, buf3, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_div_0[grid(16, 4)](buf1, primals_5, buf4, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_1[grid(64)](buf5, primals_10, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_add_2[grid(256)](buf8, primals_10, buf6,
buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_3[grid(16, 4)](buf2, primals_8, buf9, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_12, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_12
return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout
=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (hidden_size, num_attention_heads))
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.query_net.weight
primals_2 = self.query_net.bias
primals_4 = self.key_net.weight
primals_5 = self.key_net.bias
primals_7 = self.value_net.weight
primals_8 = self.value_net.bias
primals_11 = self.out_projection.weight
primals_12 = self.out_projection.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
Royeqiu/Nemo_ASR
|
MultiHeadAttention
| false
| 17,869
|
[
"Apache-2.0"
] | 10
|
12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
https://github.com/Royeqiu/Nemo_ASR/tree/12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
binary_last_fc
|
from torch.autograd import Function
import torch
import torch.nn as nn
import torch.nn.functional as F
class XNOR_BinaryQuantize(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone().clamp(min=-1, max=1)
return grad_input
class XNOR_BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.sign(input)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input[0].ge(1)] = 0
grad_input[input[1].le(-1)] = 0
return grad_input
class binary_last_fc(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(binary_last_fc, self).__init__(in_features, out_features, bias)
w = self.weight
sw = w.abs().mean().float().detach()
self.alpha = nn.Parameter(sw, requires_grad=True)
def forward(self, input):
a0 = input
w = self.weight
w1 = w - w.mean([1], keepdim=True)
w2 = w1 / w1.std([1], keepdim=True)
a1 = a0 - a0.mean([1], keepdim=True)
a2 = a1 / a1.std([1], keepdim=True)
bw = XNOR_BinaryQuantize().apply(w2)
ba = XNOR_BinaryQuantize_a().apply(a2)
output = F.linear(ba, bw, self.bias)
output = output * self.alpha
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mean_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_sign_std_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp1 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp2 - tmp9
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = tmp4 - tmp9
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp6 - tmp9
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = 3.0
tmp22 = tmp20 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp0 / tmp23
tmp25 = tl.full([1], 0, tl.int32)
tmp26 = tmp25 < tmp24
tmp27 = tmp26.to(tl.int8)
tmp28 = tmp24 < tmp25
tmp29 = tmp28.to(tl.int8)
tmp30 = tmp27 - tmp29
tmp31 = tmp30.to(tmp24.dtype)
tl.store(out_ptr0 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused_div_sign_std_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp1 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp2 - tmp9
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = tmp4 - tmp9
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp6 - tmp9
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = 3.0
tmp22 = tmp20 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp0 / tmp23
tmp25 = tl.full([1], 0, tl.int32)
tmp26 = tmp25 < tmp24
tmp27 = tmp26.to(tl.int8)
tmp28 = tmp24 < tmp25
tmp29 = tmp28.to(tl.int8)
tmp30 = tmp27 - tmp29
tmp31 = tmp30.to(tmp24.dtype)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(16)](primals_2, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mean_sub_1[grid(256)](primals_1, buf1, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_sign_std_2[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_sign_std_3[grid(16)](buf0, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_3, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(buf3, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf4)
del buf3
del primals_3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_4[grid(256)](buf4, primals_4, buf5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_2, primals_4, reinterpret_tensor(buf2, (64, 4), (4,
1), 0), buf4
class XNOR_BinaryQuantize(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone().clamp(min=-1, max=1)
return grad_input
class XNOR_BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.sign(input)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input[0].ge(1)] = 0
grad_input[input[1].le(-1)] = 0
return grad_input
class binary_last_fcNew(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(binary_last_fcNew, self).__init__(in_features, out_features, bias
)
w = self.weight
sw = w.abs().mean().float().detach()
self.alpha = nn.Parameter(sw, requires_grad=True)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_4 = self.alpha
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
RuiLin0212/BATMANN
|
binary_last_fc
| false
| 17,870
|
[
"MIT"
] | 6
|
5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
https://github.com/RuiLin0212/BATMANN/tree/5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
|
ValueNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ValueNetwork(nn.Module):
def __init__(self, input_dim, output_dim, init_w=0.003):
super(ValueNetwork, self).__init__()
self.fc1 = nn.Linear(input_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, output_dim)
self.fc3.weight.data.uniform_(-init_w, init_w)
self.fc3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf5, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), primals_6, buf5, primals_4, buf6
class ValueNetworkNew(nn.Module):
def __init__(self, input_dim, output_dim, init_w=0.003):
super(ValueNetworkNew, self).__init__()
self.fc1 = nn.Linear(input_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, output_dim)
self.fc3.weight.data.uniform_(-init_w, init_w)
self.fc3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
SAMMiCA/DL_based_E2E_Driving
|
ValueNetwork
| false
| 17,871
|
[
"MIT"
] | 4
|
01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
RMSELoss
|
import torch
import torch.nn as nn
class RMSELoss(torch.nn.Module):
def __init__(self):
super(RMSELoss, self).__init__()
def forward(self, x, y):
criterion = nn.MSELoss()
loss = torch.sqrt(criterion(x, y))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_sqrt_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RMSELossNew(torch.nn.Module):
def __init__(self):
super(RMSELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
SAMMiCA/DL_based_E2E_Driving
|
RMSELoss
| false
| 17,872
|
[
"MIT"
] | 4
|
01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
InvConvNear
|
import torch
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
class InvConvNear(nn.Module):
def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs):
super().__init__()
assert n_split % 2 == 0
self.channels = channels
self.n_split = n_split
self.no_jacobian = no_jacobian
w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).
normal_())[0]
if torch.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
self.weight = nn.Parameter(w_init)
def forward(self, x, x_mask=None, reverse=False, **kwargs):
b, c, t = x.size()
assert c % self.n_split == 0
if x_mask is None:
x_mask = 1
x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
else:
x_len = torch.sum(x_mask, [1, 2])
x = x.view(b, 2, c // self.n_split, self.n_split // 2, t)
x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c //
self.n_split, t)
if reverse:
if hasattr(self, 'weight_inv'):
weight = self.weight_inv
else:
weight = torch.inverse(self.weight.float())
logdet = None
else:
weight = self.weight
if self.no_jacobian:
logdet = 0
else:
logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len
weight = weight.view(self.n_split, self.n_split, 1, 1)
z = F.conv2d(x, weight)
z = z.view(b, 2, self.n_split // 2, c // self.n_split, t)
z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
return z, logdet
def store_inverse(self):
self.weight_inv = torch.inverse(self.weight.float())
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = -1.0
tmp3 = tmp1 == tmp2
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused_mul_scalar_tensor_where_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0).to(tl.int1)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = float('nan')
tmp5 = tl.where(tmp1, tmp4, tmp3)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = 4.0
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (1, 4))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._linalg_slogdet.default(primals_2)
buf1 = buf0[0]
buf2 = buf0[1]
buf3 = buf0[2]
buf4 = buf0[3]
del buf0
buf5 = empty_strided_cuda((), (), torch.bool)
get_raw_stream(0)
triton_poi_fused_eq_0[grid(1)](buf1, buf5, 1, XBLOCK=1, num_warps=1,
num_stages=1)
del buf1
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_scalar_tensor_where_1[grid(4)](buf5, buf2,
buf6, 4, XBLOCK=4, num_warps=1, num_stages=1)
del buf2
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_convolution_2[grid(4, 4)](primals_2, buf7, 4, 4,
XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
buf8 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 1, 4), (16, 4, 4, 1), 0), buf7, stride=(1, 1), padding=(0, 0
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1, 4), (16, 4, 4, 1))
del buf7
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0)
del buf8
triton_poi_fused_mul_3[grid(64)](buf9, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf9, buf6, reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4,
8, 1), 0), buf3, buf4, buf5, reinterpret_tensor(primals_2, (4, 4, 1,
1), (1, 4, 4, 4), 0)
class InvConvNearNew(nn.Module):
def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs):
super().__init__()
assert n_split % 2 == 0
self.channels = channels
self.n_split = n_split
self.no_jacobian = no_jacobian
w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).
normal_())[0]
if torch.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
self.weight = nn.Parameter(w_init)
def store_inverse(self):
self.weight_inv = torch.inverse(self.weight.float())
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
Royeqiu/Nemo_ASR
|
InvConvNear
| false
| 17,873
|
[
"Apache-2.0"
] | 10
|
12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
https://github.com/Royeqiu/Nemo_ASR/tree/12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
ConvGLU
|
import torch
from torch import nn
import torch.utils.data
import torch.optim
def str2act(txt):
"""Translates text to neural network activation"""
return {'sigmoid': nn.Sigmoid(), 'relu': nn.ReLU(), 'none': nn.
Sequential(), 'lrelu': nn.LeakyReLU(0.2), 'selu': nn.SELU()}[txt.
lower()]
class ConvGLU(nn.Module):
"""
A convGlu operation, used by the Degli paper's model.
"""
def __init__(self, in_ch, out_ch, kernel_size=(7, 7), padding=None,
batchnorm=False, act='sigmoid', stride=None):
super().__init__()
if not padding:
padding = kernel_size[0] // 2, kernel_size[1] // 2
if stride is None:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding)
else:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding, stride=stride)
self.weight = self.conv.weight
self.bias = self.conv.bias
if batchnorm:
self.conv = nn.Sequential(self.conv, nn.BatchNorm2d(out_ch * 2))
self.sigmoid = str2act(act)
def forward(self, x):
x = self.conv(x)
ch = x.shape[1]
x = x[:, :ch // 2, ...] * self.sigmoid(x[:, ch // 2:, ...])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def str2act(txt):
"""Translates text to neural network activation"""
return {'sigmoid': nn.Sigmoid(), 'relu': nn.ReLU(), 'none': nn.
Sequential(), 'lrelu': nn.LeakyReLU(0.2), 'selu': nn.SELU()}[txt.
lower()]
class ConvGLUNew(nn.Module):
"""
A convGlu operation, used by the Degli paper's model.
"""
def __init__(self, in_ch, out_ch, kernel_size=(7, 7), padding=None,
batchnorm=False, act='sigmoid', stride=None):
super().__init__()
if not padding:
padding = kernel_size[0] // 2, kernel_size[1] // 2
if stride is None:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding)
else:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding, stride=stride)
self.weight = self.conv.weight
self.bias = self.conv.bias
if batchnorm:
self.conv = nn.Sequential(self.conv, nn.BatchNorm2d(out_ch * 2))
self.sigmoid = str2act(act)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Royeqiu/Nemo_ASR
|
ConvGLU
| false
| 17,874
|
[
"Apache-2.0"
] | 10
|
12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
https://github.com/Royeqiu/Nemo_ASR/tree/12b91b06dc5e4d0aa29d43bc7e701a93ee5eec4e
|
MOTION_ReplaceBlock_D
|
import torch
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
class MOTION_ReplaceBlock_D(nn.Module):
"""
reuse conv
"""
def __init__(self, in_channels, n_segment, n_div):
super(MOTION_ReplaceBlock_D, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.frame_conv = nn.Conv2d(in_channels=self.fold, out_channels=
self.fold, kernel_size=3, padding=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
nn.init.constant_(self.frame_conv.weight, 0)
nn.init.constant_(self.frame_conv.bias, 0)
def forward(self, x):
"""
:param x: (nt, c, h, w)
:return:(nt, c, h, w)
"""
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
out = torch.zeros_like(x)
out_part = x.view(nt, c, h, w)[:, :self.fold]
out_part = self.frame_conv(out_part)
out_part = out_part.view(n_batch, self.n_segment, self.fold, h, w)
out[:, :-1, :self.fold] = out_part[:, 1:, :self.fold] - x[:, :-1, :
self.fold]
out_part = x.view(nt, c, h, w)[:, self.fold:2 * self.fold]
out_part = self.frame_conv(out_part)
out_part = out_part.view(n_batch, self.n_segment, self.fold, h, w)
out[:, 1:, self.fold:2 * self.fold] = x[:, 1:, self.fold:2 * self.fold
] - out_part[:, :-1, :self.fold]
out[:, :, 2 * self.fold:] = x[:, :, 2 * self.fold:]
out = out.view(nt, c, h, w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_segment': 4, 'n_div': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_sub_zeros_like_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x2 = xindex // 64
x0 = xindex % 16
tmp13 = tl.load(in_ptr2 + 0)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + x3, tmp2 & xmask, other=0.0)
tmp4 = x2
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tmp0 >= tmp5
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (-16 + x0 + 16 * x2), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tmp12 + tmp14
tmp16 = tmp11 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tmp19 = tl.full([1], 3, tl.int64)
tmp20 = tmp4 < tmp19
tmp21 = tmp20 & tmp6
tmp22 = tmp0 < tmp5
tmp23 = tmp22 & tmp21
tmp24 = tl.load(in_ptr3 + (16 + x0 + 16 * x2), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp24 + tmp14
tmp26 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 - tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp23, tmp27, tmp28)
tmp30 = 0.0
tmp31 = tl.where(tmp22, tmp29, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp21, tmp31, tmp32)
tmp34 = tl.where(tmp20, tmp33, tmp30)
tmp35 = tl.where(tmp9, tmp18, tmp34)
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp6, tmp35, tmp36)
tmp38 = tmp22 & tmp20
tmp39 = tl.load(in_ptr3 + (16 + x0 + 16 * x2), tmp38 & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tmp39 + tmp14
tmp41 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp38 & xmask,
eviction_policy='evict_last', other=0.0)
tmp42 = tmp40 - tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp38, tmp42, tmp43)
tmp45 = tl.where(tmp22, tmp44, tmp30)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp20, tmp45, tmp46)
tmp48 = tl.where(tmp20, tmp47, tmp30)
tmp49 = tl.where(tmp6, tmp37, tmp48)
tmp50 = tl.where(tmp2, tmp3, tmp49)
tl.store(out_ptr0 + x3, tmp50, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 16, 4, 1), 0), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 16, 4, 1), 16), primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_sub_zeros_like_0[grid(256)](primals_1, buf1,
primals_3, buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16,
4, 1), 0), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4,
1), 16)
class MOTION_ReplaceBlock_DNew(nn.Module):
"""
reuse conv
"""
def __init__(self, in_channels, n_segment, n_div):
super(MOTION_ReplaceBlock_DNew, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.frame_conv = nn.Conv2d(in_channels=self.fold, out_channels=
self.fold, kernel_size=3, padding=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
nn.init.constant_(self.frame_conv.weight, 0)
nn.init.constant_(self.frame_conv.bias, 0)
def forward(self, input_0):
primals_2 = self.frame_conv.weight
primals_3 = self.frame_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RongchangLi/DEN
|
MOTION_ReplaceBlock_D
| false
| 17,875
|
[
"MIT"
] | 4
|
f8b744f96a3a68cf0784080ffd561a5279715727
|
https://github.com/RongchangLi/DEN/tree/f8b744f96a3a68cf0784080ffd561a5279715727
|
TransformerEncoderLayer
|
import math
import torch
from typing import Callable
from typing import Optional
from typing import Tuple
from typing import List
from typing import Dict
from typing import Union
from typing import Any
import torch.utils.data
import torch.nn.functional as F
import torch.nn
import torch.cuda
import torch.backends.cudnn
import torch.optim
import torch.cuda.amp
class LayerWithVisualization(torch.nn.Module):
def __init__(self):
super().__init__()
self.visualization_enabled = False
def prepare(self):
pass
def plot(self, options: 'Dict[str, Any]') ->Dict[str, Any]:
raise NotImplementedError()
class AttentionMergeMixin:
def __init__(self, out_size: 'Optional[int]') ->None:
self.multi_head_merge = torch.nn.Linear(self.n_heads * self.
projection_size, out_size or self.state_size, bias=False)
def merged_attention(self, n_batch: 'int', n_out_steps: 'int', *args,
need_weights: bool=False, **kwargs) ->Union[torch.Tensor, Tuple[
torch.Tensor, torch.Tensor]]:
data, scores = self._attention(*args, **kwargs)
data = data.view(n_batch, self.n_heads, n_out_steps, -1).permute(0,
2, 1, 3).contiguous().view(n_batch, n_out_steps, -1)
return self.multi_head_merge(data), scores
class MultiHeadAttentionBase(LayerWithVisualization):
def __init__(self, state_size: 'int', n_heads: 'int', dropout: 'float'=
0.1, projection_size: 'Optional[int]'=None):
assert state_size % n_heads == 0
super().__init__()
self.attention_to_visualize = []
self.state_size = state_size
self.projection_size = projection_size or state_size // n_heads
self.n_heads = n_heads
self.scale = 1.0 / math.sqrt(self.projection_size)
self.dropout = torch.nn.Dropout(dropout)
@staticmethod
def apply_logit_masks(logits: 'torch.Tensor', mask:
'Optional[AttentionMask]', val: 'float'=float('-inf')) ->torch.Tensor:
if mask.position_mask is not None:
logits = logits.masked_fill(mask.position_mask, val)
if mask.src_length_mask is not None:
b, i = mask.src_length_mask.shape
pad_dims = logits.ndim - 2
logits = logits.masked_fill(mask.src_length_mask.view([b] + [1] *
pad_dims + [i]), val)
return logits
def _masked_softmax(self, logits: 'torch.Tensor', mask:
'Optional[AttentionMask]') ->torch.Tensor:
if (mask is None or mask.src_length_mask is None and mask.
position_mask is None):
return F.softmax(logits, -1)
bb, n_time_dest, n_time_src = logits.shape
logits = logits.view(bb // self.n_heads, self.n_heads, n_time_dest,
n_time_src)
logits = self.apply_logit_masks(logits, mask)
logits = F.softmax(logits, -1)
return logits.view(bb, n_time_dest, n_time_src)
def _attention_read(self, mask: 'Optional[AttentionMask]', scores:
'torch.Tensor', v: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]:
s_reshape = scores.view(-1, self.n_heads, *scores.shape[1:])
if self.visualization_enabled:
self.attention_to_visualize.append(s_reshape[0])
return torch.bmm(scores, v), s_reshape
def transform_data(self, input: 'torch.Tensor', proj:
'Callable[[torch.Tensor], torch.Tensor]', n_projs: 'int') ->List[torch
.Tensor]:
n_batch, n_steps, _ = input.shape
transformed = proj(input).view(n_batch, n_steps, self.n_heads,
n_projs, -1).permute(0, 2, 1, 3, 4).contiguous().view(n_batch *
self.n_heads, n_steps, n_projs, -1)
return transformed.unbind(dim=2)
def plot(self, options: 'Dict[str, Any]') ->Dict[str, Any]:
r = {}
marks = options.get('steplabel')
if options.get('mha.plot_head_details'
) and self.attention_to_visualize[0].shape[0] > 1:
for head in range(self.attention_to_visualize[0].shape[0]):
r[f'head_{head}'] = framework.visualize.plot.AnimatedHeatmap(
torch.stack([layer[head] for _, layer in enumerate(self
.attention_to_visualize)], 0), ylabel='dest', xlabel=
'src', textval=False, x_marks=marks, y_marks=marks,
ignore_wrong_marks=True)
r['attention_max'] = framework.visualize.plot.AnimatedHeatmap(torch
.stack([layer.max(0)[0] for _, layer in enumerate(self.
attention_to_visualize)], 0), ylabel='dest', xlabel='src',
textval=False, x_marks=marks, y_marks=marks, ignore_wrong_marks
=True)
self.attention_to_visualize = []
return r
class AbsPosAttentionBase(MultiHeadAttentionBase):
def get_attention_scores(self, mask: 'Optional[torch.Tensor]', q:
'torch.Tensor', k: 'torch.Tensor') ->torch.Tensor:
logits = torch.bmm(q, k.transpose(1, 2))
return self._masked_softmax(logits * self.scale, mask)
def _attention(self, mask: 'Optional[torch.Tensor]', q: 'torch.Tensor',
k: 'torch.Tensor', v: 'torch.Tensor') ->torch.Tensor:
scores = self.get_attention_scores(mask, q, k)
return self._attention_read(mask, scores, v)
class MultiHeadAttention(AttentionMergeMixin, AbsPosAttentionBase):
def __init__(self, state_size: 'int', n_heads: 'int', dropout: 'float'=
0.1, input_size: 'Optional[int]'=None, out_size: 'Optional[int]'=None):
super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout)
self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.
projection_size, bias=False)
self.data_to_q = torch.nn.Linear(input_size or state_size, n_heads *
self.projection_size, bias=False)
super(MultiHeadAttention, self).__init__(out_size)
self.reset_parameters()
def forward(self, curr_state: 'torch.Tensor', attend_to: 'torch.Tensor',
mask: 'Optional[AttentionMask]', need_weights: 'bool'=False):
k, v = self.transform_data(attend_to, self.data_to_kv, 2)
q, = self.transform_data(curr_state, self.data_to_q, 1)
data, scores = self.merged_attention(curr_state.shape[0], q.shape[1
], mask, q, k, v)
if need_weights:
return data, scores
else:
return data
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.data_to_q.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
class TransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation: 'ActivationFunction'=F.relu, attention_dropout=0):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(d_model, nhead, dropout=
attention_dropout)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout)
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, src: 'torch.Tensor', mask: 'Optional[AttentionMask]'=None
) ->torch.Tensor:
src2 = self.self_attn(src, src, mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.
init.calculate_gain('relu') if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from typing import Callable
from typing import Optional
from typing import Tuple
from typing import List
from typing import Dict
from typing import Union
from typing import Any
import torch.utils.data
import torch.nn.functional as F
import torch.nn
import torch.cuda
import torch.backends.cudnn
import torch.optim
import torch.cuda.amp
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 4
x2 = xindex // 8 % 4
x3 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 2 * x2 + 8 * x1 + 32 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (2048, 4), (4, 1))
assert_size_stride(primals_8, (2048,), (1,))
assert_size_stride(primals_9, (4, 2048), (2048, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf1, buf2, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 2, 1), (32, 8, 2, 1, 1), torch.
float32)
triton_poi_fused_clone_1[grid(128)](buf0, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf4 = reinterpret_tensor(buf1, (16, 1, 4), (4, 64, 1), 0)
del buf1
triton_poi_fused_bmm_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 0),
0), buf4, out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 64), 0)
del buf4
triton_poi_fused_bmm_5[grid(64)](buf3, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, buf8, out=buf9)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf11,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf11,
buf12, buf13, primals_5, primals_6, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_6
buf15 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 2048), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 2048), (8192, 2048, 1), 0)
del buf15
buf22 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_8[grid(32768)](buf16,
primals_8, buf22, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_9, (2048, 4), (1, 2048), 0), out
=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0)
del buf17
triton_poi_fused_add_9[grid(64)](buf18, buf14, primals_10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
buf19 = buf13
del buf13
buf20 = buf12
del buf12
triton_poi_fused_native_layer_norm_10[grid(16)](buf18, buf19, buf20,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_11[grid(64)](buf18, buf19, buf20,
primals_11, primals_12, buf21, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf19
del buf20
del primals_12
return buf21, primals_1, primals_5, primals_11, buf7, reinterpret_tensor(
buf10, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4
), (4, 1), 0), reinterpret_tensor(buf16, (16, 2048), (2048, 1), 0
), buf18, primals_9, buf22, primals_7, primals_4, reinterpret_tensor(
buf3, (16, 1, 4), (8, 1, 2), 1), reinterpret_tensor(buf2, (16, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 4, 1), (8, 2, 1), 0)
class LayerWithVisualization(torch.nn.Module):
def __init__(self):
super().__init__()
self.visualization_enabled = False
def prepare(self):
pass
def plot(self, options: 'Dict[str, Any]') ->Dict[str, Any]:
raise NotImplementedError()
class AttentionMergeMixin:
def __init__(self, out_size: 'Optional[int]') ->None:
self.multi_head_merge = torch.nn.Linear(self.n_heads * self.
projection_size, out_size or self.state_size, bias=False)
def merged_attention(self, n_batch: 'int', n_out_steps: 'int', *args,
need_weights: bool=False, **kwargs) ->Union[torch.Tensor, Tuple[
torch.Tensor, torch.Tensor]]:
data, scores = self._attention(*args, **kwargs)
data = data.view(n_batch, self.n_heads, n_out_steps, -1).permute(0,
2, 1, 3).contiguous().view(n_batch, n_out_steps, -1)
return self.multi_head_merge(data), scores
class MultiHeadAttentionBase(LayerWithVisualization):
def __init__(self, state_size: 'int', n_heads: 'int', dropout: 'float'=
0.1, projection_size: 'Optional[int]'=None):
assert state_size % n_heads == 0
super().__init__()
self.attention_to_visualize = []
self.state_size = state_size
self.projection_size = projection_size or state_size // n_heads
self.n_heads = n_heads
self.scale = 1.0 / math.sqrt(self.projection_size)
self.dropout = torch.nn.Dropout(dropout)
@staticmethod
def apply_logit_masks(logits: 'torch.Tensor', mask:
'Optional[AttentionMask]', val: 'float'=float('-inf')) ->torch.Tensor:
if mask.position_mask is not None:
logits = logits.masked_fill(mask.position_mask, val)
if mask.src_length_mask is not None:
b, i = mask.src_length_mask.shape
pad_dims = logits.ndim - 2
logits = logits.masked_fill(mask.src_length_mask.view([b] + [1] *
pad_dims + [i]), val)
return logits
def _masked_softmax(self, logits: 'torch.Tensor', mask:
'Optional[AttentionMask]') ->torch.Tensor:
if (mask is None or mask.src_length_mask is None and mask.
position_mask is None):
return F.softmax(logits, -1)
bb, n_time_dest, n_time_src = logits.shape
logits = logits.view(bb // self.n_heads, self.n_heads, n_time_dest,
n_time_src)
logits = self.apply_logit_masks(logits, mask)
logits = F.softmax(logits, -1)
return logits.view(bb, n_time_dest, n_time_src)
def _attention_read(self, mask: 'Optional[AttentionMask]', scores:
'torch.Tensor', v: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]:
s_reshape = scores.view(-1, self.n_heads, *scores.shape[1:])
if self.visualization_enabled:
self.attention_to_visualize.append(s_reshape[0])
return torch.bmm(scores, v), s_reshape
def transform_data(self, input: 'torch.Tensor', proj:
'Callable[[torch.Tensor], torch.Tensor]', n_projs: 'int') ->List[torch
.Tensor]:
n_batch, n_steps, _ = input.shape
transformed = proj(input).view(n_batch, n_steps, self.n_heads,
n_projs, -1).permute(0, 2, 1, 3, 4).contiguous().view(n_batch *
self.n_heads, n_steps, n_projs, -1)
return transformed.unbind(dim=2)
def plot(self, options: 'Dict[str, Any]') ->Dict[str, Any]:
r = {}
marks = options.get('steplabel')
if options.get('mha.plot_head_details'
) and self.attention_to_visualize[0].shape[0] > 1:
for head in range(self.attention_to_visualize[0].shape[0]):
r[f'head_{head}'] = framework.visualize.plot.AnimatedHeatmap(
torch.stack([layer[head] for _, layer in enumerate(self
.attention_to_visualize)], 0), ylabel='dest', xlabel=
'src', textval=False, x_marks=marks, y_marks=marks,
ignore_wrong_marks=True)
r['attention_max'] = framework.visualize.plot.AnimatedHeatmap(torch
.stack([layer.max(0)[0] for _, layer in enumerate(self.
attention_to_visualize)], 0), ylabel='dest', xlabel='src',
textval=False, x_marks=marks, y_marks=marks, ignore_wrong_marks
=True)
self.attention_to_visualize = []
return r
class AbsPosAttentionBase(MultiHeadAttentionBase):
def get_attention_scores(self, mask: 'Optional[torch.Tensor]', q:
'torch.Tensor', k: 'torch.Tensor') ->torch.Tensor:
logits = torch.bmm(q, k.transpose(1, 2))
return self._masked_softmax(logits * self.scale, mask)
def _attention(self, mask: 'Optional[torch.Tensor]', q: 'torch.Tensor',
k: 'torch.Tensor', v: 'torch.Tensor') ->torch.Tensor:
scores = self.get_attention_scores(mask, q, k)
return self._attention_read(mask, scores, v)
class MultiHeadAttention(AttentionMergeMixin, AbsPosAttentionBase):
def __init__(self, state_size: 'int', n_heads: 'int', dropout: 'float'=
0.1, input_size: 'Optional[int]'=None, out_size: 'Optional[int]'=None):
super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout)
self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.
projection_size, bias=False)
self.data_to_q = torch.nn.Linear(input_size or state_size, n_heads *
self.projection_size, bias=False)
super(MultiHeadAttention, self).__init__(out_size)
self.reset_parameters()
def forward(self, curr_state: 'torch.Tensor', attend_to: 'torch.Tensor',
mask: 'Optional[AttentionMask]', need_weights: 'bool'=False):
k, v = self.transform_data(attend_to, self.data_to_kv, 2)
q, = self.transform_data(curr_state, self.data_to_q, 1)
data, scores = self.merged_attention(curr_state.shape[0], q.shape[1
], mask, q, k, v)
if need_weights:
return data, scores
else:
return data
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.data_to_q.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
class TransformerEncoderLayerNew(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation: 'ActivationFunction'=F.relu, attention_dropout=0):
super(TransformerEncoderLayerNew, self).__init__()
self.self_attn = MultiHeadAttention(d_model, nhead, dropout=
attention_dropout)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout)
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.
init.calculate_gain('relu') if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
def forward(self, input_0):
primals_2 = self.self_attn.data_to_kv.weight
primals_3 = self.self_attn.data_to_q.weight
primals_4 = self.self_attn.multi_head_merge.weight
primals_7 = self.linear1.weight
primals_8 = self.linear1.bias
primals_9 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.norm1.weight
primals_10 = self.norm1.bias
primals_11 = self.norm2.weight
primals_12 = self.norm2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
RobertCsordas/tcf
|
TransformerEncoderLayer
| false
| 17,876
|
[
"MIT"
] | 5
|
da20530dfb4336deddfbe5e79d62e72d1dc2580e
|
https://github.com/RobertCsordas/tcf/tree/da20530dfb4336deddfbe5e79d62e72d1dc2580e
|
EncoderLayer
|
import torch
import torch as th
import torch.nn as nn
class FeedForwardNetwork(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate):
super(FeedForwardNetwork, self).__init__()
self.layer1 = nn.Linear(hidden_size, ffn_size)
self.gelu = nn.GELU()
self.layer2 = nn.Linear(ffn_size, hidden_size)
def forward(self, x):
x = self.layer1(x)
x = self.gelu(x)
x = self.layer2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = th.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = th.softmax(x, dim=3)
attn_mat = x
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x, attn_mat
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate,
attention_dropout_rate, num_heads):
super(EncoderLayer, self).__init__()
self.self_attention_norm = nn.LayerNorm(hidden_size)
self.self_attention = MultiHeadAttention(hidden_size,
attention_dropout_rate, num_heads)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size)
self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
def forward(self, x, attn_bias=None):
y = self.self_attention_norm(x)
y, attn_mat = self.self_attention(y, y, y, attn_bias)
y = self.self_attention_dropout(y)
x = x + y
y = self.ffn_norm(x)
y = self.ffn(y)
y = self.ffn_dropout(y)
x = x + y
return x, attn_mat
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'ffn_size': 4, 'dropout_rate': 0.5,
'attention_dropout_rate': 0.5, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch as th
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_mul_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, primals_5, buf6, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_7, buf7, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf8, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_5[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(16, 4)](buf5, primals_9, buf11, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_11
buf15 = buf1
del buf1
buf16 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf14,
buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_3, buf14,
buf15, buf16, primals_12, primals_13, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf15
del buf16
del primals_13
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf18)
del primals_15
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_9[grid(64)](buf18, buf19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0)
del buf20
triton_poi_fused_add_10[grid(64)](buf21, primals_3, buf14,
primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
return buf21, buf10, primals_3, primals_12, reinterpret_tensor(buf2, (
16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0
), buf14, reinterpret_tensor(buf17, (16, 4), (4, 1), 0
), buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0
), primals_16, primals_14, primals_10, reinterpret_tensor(buf11, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_6, primals_4
class FeedForwardNetwork(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate):
super(FeedForwardNetwork, self).__init__()
self.layer1 = nn.Linear(hidden_size, ffn_size)
self.gelu = nn.GELU()
self.layer2 = nn.Linear(ffn_size, hidden_size)
def forward(self, x):
x = self.layer1(x)
x = self.gelu(x)
x = self.layer2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = th.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = th.softmax(x, dim=3)
attn_mat = x
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x, attn_mat
class EncoderLayerNew(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate,
attention_dropout_rate, num_heads):
super(EncoderLayerNew, self).__init__()
self.self_attention_norm = nn.LayerNorm(hidden_size)
self.self_attention = MultiHeadAttention(hidden_size,
attention_dropout_rate, num_heads)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size)
self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_1 = self.self_attention_norm.weight
primals_2 = self.self_attention_norm.bias
primals_4 = self.self_attention.linear_q.weight
primals_5 = self.self_attention.linear_q.bias
primals_6 = self.self_attention.linear_k.weight
primals_7 = self.self_attention.linear_k.bias
primals_8 = self.self_attention.linear_v.weight
primals_9 = self.self_attention.linear_v.bias
primals_10 = self.self_attention.output_layer.weight
primals_11 = self.self_attention.output_layer.bias
primals_12 = self.ffn_norm.weight
primals_13 = self.ffn_norm.bias
primals_14 = self.ffn.layer1.weight
primals_15 = self.ffn.layer1.bias
primals_16 = self.ffn.layer2.weight
primals_17 = self.ffn.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0], output[1]
|
Roestlab/massformer
|
EncoderLayer
| false
| 17,877
|
[
"BSD-2-Clause"
] | 6
|
c6324970c392f8ee96651679f49d21e430caa0c9
|
https://github.com/Roestlab/massformer/tree/c6324970c392f8ee96651679f49d21e430caa0c9
|
SoftQNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=0.003):
super(SoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (256, 8), (8, 1))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (256, 256), (256, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1024)](buf2, primals_4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), (
1, 256), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(1024)](buf4, primals_6, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
class SoftQNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=0.003):
super(SoftQNetworkNew, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_5 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
SAMMiCA/DL_based_E2E_Driving
|
SoftQNetwork
| false
| 17,878
|
[
"MIT"
] | 4
|
01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
PositionwiseFeedForward
|
import math
import torch
import torch.distributed
import torch
import torch.nn as nn
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.distributed
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_tanh_2[grid(256)](buf3, buf4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, primals_4
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.w_1.weight
primals_1 = self.w_1.bias
primals_6 = self.w_2.weight
primals_2 = self.w_2.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
RowitZou/CG-nAR
|
PositionwiseFeedForward
| false
| 17,879
|
[
"MIT"
] | 8
|
8e2debeb3170045592b3b674ea6f9b56251e71f4
|
https://github.com/RowitZou/CG-nAR/tree/8e2debeb3170045592b3b674ea6f9b56251e71f4
|
GlobalAvgPool2d
|
import torch
from torch import nn
import torch.nn.functional as F
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
return F.avg_pool2d(x, kernel_size=x.size()[2:])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class GlobalAvgPool2dNew(nn.Module):
def __init__(self):
super(GlobalAvgPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Sandy1230/Dive-into-DL-PyTorch-master
|
GlobalAvgPool2d
| false
| 17,880
|
[
"Apache-2.0"
] | 4
|
eca149f6b706a4e6a7b377707deab22341b014d1
|
https://github.com/Sandy1230/Dive-into-DL-PyTorch-master/tree/eca149f6b706a4e6a7b377707deab22341b014d1
|
PolicyNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=
0.003, log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def sample(self, state, scale, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_pi = normal.log_prob(z) - torch.log(scale * (1 - action.pow(2)) +
epsilon)
log_pi = log_pi.sum(1, keepdim=True)
return action, log_pi, mean, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf9, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf8, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf5,
primals_9, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del primals_9
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf7, primals_8, primals_6, buf8, primals_4, buf9
class PolicyNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=
0.003, log_std_min=-20, log_std_max=2):
super(PolicyNetworkNew, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def sample(self, state, scale, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_pi = normal.log_prob(z) - torch.log(scale * (1 - action.pow(2)) +
epsilon)
log_pi = log_pi.sum(1, keepdim=True)
return action, log_pi, mean, std
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.mean_linear.weight
primals_7 = self.mean_linear.bias
primals_8 = self.log_std_linear.weight
primals_9 = self.log_std_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
SAMMiCA/DL_based_E2E_Driving
|
PolicyNetwork
| false
| 17,881
|
[
"MIT"
] | 4
|
01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
CorrConv
|
from torch.autograd import Function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data
import torch.nn.parallel
class CorrConvFunction(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, lamda=0.005
):
ctx.save_for_backward(input, weight, bias)
ctx.lamda = lamda
ctx.stride = stride
ctx.padding = padding
output = nn.functional.conv2d(Variable(input), Variable(weight),
bias=Variable(bias), stride=stride, padding=padding)
return output.data
@staticmethod
def backward(ctx, grad_output):
input, weight, bias = ctx.saved_variables
lamda = ctx.lamda
stride = ctx.stride
padding = ctx.padding
HalfFeaIn = int(input.size(1) * 0.5)
BatchSize = input.size(0)
eps = 0.0001
grad_input = grad_weight = grad_bias = None
input1 = input[:, :HalfFeaIn]
input2 = input[:, HalfFeaIn:]
X1 = Variable(input1.data.clone(), requires_grad=True)
X2 = Variable(input2.data.clone(), requires_grad=True)
W1 = weight[:, :HalfFeaIn]
W2 = weight[:, HalfFeaIn:]
net1 = nn.Conv2d(W1.size(1), W1.size(0), [W1.size(2), W1.size(3)],
bias=None, stride=stride, padding=padding)
net2 = nn.Conv2d(W2.size(1), W2.size(0), [W2.size(2), W2.size(3)],
bias=None, stride=stride, padding=padding)
net1.weight.data.copy_(W1.data)
net2.weight.data.copy_(W2.data)
Y1 = net1(X1)
Y2 = net2(X2)
Offset1 = Y1 - torch.mean(Y1, 0, keepdim=True).expand_as(Y1)
Offset2 = Y2 - torch.mean(Y2, 0, keepdim=True).expand_as(Y2)
CrossVar = torch.sum(Offset1 * Offset2, 0, keepdim=True)
AbsVar1 = torch.sum(Offset1, 0, keepdim=True)
AbsVar2 = torch.sum(Offset2, 0, keepdim=True)
Sigma1 = torch.sum(Offset1 ** 2, 0, keepdim=True)
Sigma2 = torch.sum(Offset2 ** 2, 0, keepdim=True)
tmpExp_I = torch.pow(Sigma1 * Sigma2 + eps, -0.5)
tmpExp_II = -0.5 * torch.pow(tmpExp_I, 3)
dCorrdSigma1 = tmpExp_II * Sigma2 * CrossVar
dCorrdSigma2 = tmpExp_II * Sigma1 * CrossVar
dCorrdMu1 = -1 * AbsVar2 * tmpExp_I + -2 * dCorrdSigma1 * AbsVar1
dCorrdMu2 = -1 * AbsVar1 * tmpExp_I + -2 * dCorrdSigma2 * AbsVar2
dCorrdY1 = Offset2 * tmpExp_I.expand_as(Y1) + dCorrdMu1.expand_as(Y1
) / BatchSize + 2 * Offset1 * dCorrdSigma1.expand_as(Y1)
dCorrdY2 = Offset1 * tmpExp_I.expand_as(Y2) + dCorrdMu2.expand_as(Y2
) / BatchSize + 2 * Offset2 * dCorrdSigma2.expand_as(Y2)
Y1.backward(dCorrdY1)
Y2.backward(dCorrdY2)
dCorrdX = torch.cat((X1.grad, X2.grad), 1)
dCorrdW = torch.cat((net1.weight.grad, net2.weight.grad), 1)
net = nn.Conv2d(weight.size(1), weight.size(0), [weight.size(2),
weight.size(3)], stride=stride, padding=padding)
net.weight.data.copy_(weight.data)
net.bias.data.copy_(bias.data)
new_input = Variable(input.data.clone(), requires_grad=True)
output = net(new_input)
output.backward(grad_output)
if ctx.needs_input_grad[0]:
grad_input = new_input.grad - lamda * dCorrdX
if ctx.needs_input_grad[1]:
grad_weight = net.weight.grad - lamda * dCorrdW
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = net.bias.grad
return grad_input, grad_weight, grad_bias, None, None, None
class CorrConv(nn.Module):
def __init__(self, input_features, output_features, kernel_size, stride
=1, padding=0, bias=True, lamda=0.005):
super(CorrConv, self).__init__()
self.input_features = input_features
self.output_features = output_features
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.lamda = lamda
self.weight = nn.Parameter(torch.Tensor(output_features,
input_features, kernel_size, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(output_features))
else:
self.register_parameter('bias', None)
self.weight.data.normal_(0, 0.01)
if bias is not None:
self.bias.data.zero_()
def forward(self, input):
return CorrConvFunction.apply(input, self.weight, self.bias, self.
stride, self.padding, self.lamda)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_features': 4, 'output_features': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4,), (1,))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](arg2_1, buf0, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg2_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf1, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 4, 4))
del buf0
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, arg0_1, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
return buf3,
class CorrConvFunction(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, lamda=0.005
):
ctx.save_for_backward(input, weight, bias)
ctx.lamda = lamda
ctx.stride = stride
ctx.padding = padding
output = nn.functional.conv2d(Variable(input), Variable(weight),
bias=Variable(bias), stride=stride, padding=padding)
return output.data
@staticmethod
def backward(ctx, grad_output):
input, weight, bias = ctx.saved_variables
lamda = ctx.lamda
stride = ctx.stride
padding = ctx.padding
HalfFeaIn = int(input.size(1) * 0.5)
BatchSize = input.size(0)
eps = 0.0001
grad_input = grad_weight = grad_bias = None
input1 = input[:, :HalfFeaIn]
input2 = input[:, HalfFeaIn:]
X1 = Variable(input1.data.clone(), requires_grad=True)
X2 = Variable(input2.data.clone(), requires_grad=True)
W1 = weight[:, :HalfFeaIn]
W2 = weight[:, HalfFeaIn:]
net1 = nn.Conv2d(W1.size(1), W1.size(0), [W1.size(2), W1.size(3)],
bias=None, stride=stride, padding=padding)
net2 = nn.Conv2d(W2.size(1), W2.size(0), [W2.size(2), W2.size(3)],
bias=None, stride=stride, padding=padding)
net1.weight.data.copy_(W1.data)
net2.weight.data.copy_(W2.data)
Y1 = net1(X1)
Y2 = net2(X2)
Offset1 = Y1 - torch.mean(Y1, 0, keepdim=True).expand_as(Y1)
Offset2 = Y2 - torch.mean(Y2, 0, keepdim=True).expand_as(Y2)
CrossVar = torch.sum(Offset1 * Offset2, 0, keepdim=True)
AbsVar1 = torch.sum(Offset1, 0, keepdim=True)
AbsVar2 = torch.sum(Offset2, 0, keepdim=True)
Sigma1 = torch.sum(Offset1 ** 2, 0, keepdim=True)
Sigma2 = torch.sum(Offset2 ** 2, 0, keepdim=True)
tmpExp_I = torch.pow(Sigma1 * Sigma2 + eps, -0.5)
tmpExp_II = -0.5 * torch.pow(tmpExp_I, 3)
dCorrdSigma1 = tmpExp_II * Sigma2 * CrossVar
dCorrdSigma2 = tmpExp_II * Sigma1 * CrossVar
dCorrdMu1 = -1 * AbsVar2 * tmpExp_I + -2 * dCorrdSigma1 * AbsVar1
dCorrdMu2 = -1 * AbsVar1 * tmpExp_I + -2 * dCorrdSigma2 * AbsVar2
dCorrdY1 = Offset2 * tmpExp_I.expand_as(Y1) + dCorrdMu1.expand_as(Y1
) / BatchSize + 2 * Offset1 * dCorrdSigma1.expand_as(Y1)
dCorrdY2 = Offset1 * tmpExp_I.expand_as(Y2) + dCorrdMu2.expand_as(Y2
) / BatchSize + 2 * Offset2 * dCorrdSigma2.expand_as(Y2)
Y1.backward(dCorrdY1)
Y2.backward(dCorrdY2)
dCorrdX = torch.cat((X1.grad, X2.grad), 1)
dCorrdW = torch.cat((net1.weight.grad, net2.weight.grad), 1)
net = nn.Conv2d(weight.size(1), weight.size(0), [weight.size(2),
weight.size(3)], stride=stride, padding=padding)
net.weight.data.copy_(weight.data)
net.bias.data.copy_(bias.data)
new_input = Variable(input.data.clone(), requires_grad=True)
output = net(new_input)
output.backward(grad_output)
if ctx.needs_input_grad[0]:
grad_input = new_input.grad - lamda * dCorrdX
if ctx.needs_input_grad[1]:
grad_weight = net.weight.grad - lamda * dCorrdW
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = net.bias.grad
return grad_input, grad_weight, grad_bias, None, None, None
class CorrConvNew(nn.Module):
def __init__(self, input_features, output_features, kernel_size, stride
=1, padding=0, bias=True, lamda=0.005):
super(CorrConvNew, self).__init__()
self.input_features = input_features
self.output_features = output_features
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.lamda = lamda
self.weight = nn.Parameter(torch.Tensor(output_features,
input_features, kernel_size, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(output_features))
else:
self.register_parameter('bias', None)
self.weight.data.normal_(0, 0.01)
if bias is not None:
self.bias.data.zero_()
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def forward(self, input_0):
arg1_1 = self.weight
arg0_1 = self.bias
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
SCUT-AILab/CorrReg
|
CorrConv
| false
| 17,882
|
[
"MIT"
] | 5
|
3635d237effd0c7dd1d2a831f8ab14e30edac561
|
https://github.com/SCUT-AILab/CorrReg/tree/3635d237effd0c7dd1d2a831f8ab14e30edac561
|
SELECT_fusion_block
|
import torch
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
class SELECT_fusion_block(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(SELECT_fusion_block, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.select_op = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
self.fusion_conv = nn.Conv2d(in_channels=3 * self.fold,
out_channels=self.fold, kernel_size=1, padding=0, stride=1,
bias=True)
nn.init.constant_(self.fusion_conv.weight, 0)
nn.init.constant_(self.fusion_conv.bias, 0)
def forward(self, x):
"""
:param x: (nt, c, h, w)
:return:(nt, c, h, w)
"""
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
out = torch.zeros_like(x)
out_part = x.view(nt, c, h, w)[:, :self.fold]
out_part_select = self.select_op(out_part)
out_part_select = out_part_select.view(n_batch, self.n_segment,
self.fold, h, w)
out_part = out_part.view(n_batch, self.n_segment, self.fold, h, w)
select_left = torch.zeros_like(out_part_select)
select_right = torch.zeros_like(out_part_select)
select_left[:, 1:] = out_part_select[:, :-1]
select_right[:, :-1] = out_part_select[:, 1:]
out_part = torch.cat([select_left, out_part, select_right], dim=2)
out_part = out_part.view(nt, -1, h, w)
out_part = self.fusion_conv(out_part)
out_part = out_part.view(n_batch, self.n_segment, self.fold, h, w)
out[:, :, :self.fold] = out_part[:, :, :self.fold]
out[:, :, self.fold:] = x[:, :, self.fold:]
out = out.view(nt, c, h, w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_segment': 4, 'n_div': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex % 16
x5 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x4 + 64 * x2), tmp10 & xmask, other=
float('-inf'))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x4 + 64 * x2), tmp16 & xmask, other=
float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x4 + 64 * x2), tmp23 & xmask, other=
float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x4 + 64 * x2), tmp30 & xmask, other=
float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x4 + 64 * x2), tmp33 & xmask, other=float(
'-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x4 + 64 * x2), tmp36 & xmask, other=
float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x4 + 64 * x2), tmp43 & xmask, other=
float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x4 + 64 * x2), tmp46 & xmask, other=
float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x4 + 64 * x2), tmp49 & xmask, other=
float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tl.store(out_ptr0 + x5, tmp51, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x2 = xindex // 48
x0 = xindex % 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x2
tmp6 = tmp5 >= tmp3
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + (-16 + x0 + 16 * x2), tmp7 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = 0.0
tmp10 = tl.where(tmp6, tmp8, tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 2, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tmp0 >= tmp14
tmp19 = tl.full([1], 3, tl.int64)
tmp21 = tmp5 < tmp19
tmp22 = tmp21 & tmp18
tmp23 = tl.load(in_ptr0 + (16 + x0 + 16 * x2), tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.where(tmp21, tmp23, tmp9)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tl.where(tmp16, tmp17, tmp26)
tmp28 = tl.where(tmp4, tmp12, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_copy_zeros_like_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + x3, tmp2 & xmask, other=0.0)
tmp4 = tmp0 < tmp1
tmp5 = tl.load(in_ptr1 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = 0.0
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](primals_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 4, 3, 4, 4), (192, 48, 16, 4, 1),
torch.float32)
triton_poi_fused_cat_1[grid(192)](buf0, primals_1, buf1, 192,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 3, 4,
4), (48, 16, 4, 1), 0), primals_2, stride=(1, 1), padding=(0, 0
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1))
buf3 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_copy_zeros_like_2[grid(256)](primals_1, buf2,
primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del primals_1
del primals_3
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(buf1, (4, 3, 4, 4), (48, 16, 4, 1), 0)
class SELECT_fusion_blockNew(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(SELECT_fusion_blockNew, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.select_op = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
self.fusion_conv = nn.Conv2d(in_channels=3 * self.fold,
out_channels=self.fold, kernel_size=1, padding=0, stride=1,
bias=True)
nn.init.constant_(self.fusion_conv.weight, 0)
nn.init.constant_(self.fusion_conv.bias, 0)
def forward(self, input_0):
primals_2 = self.fusion_conv.weight
primals_3 = self.fusion_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RongchangLi/DEN
|
SELECT_fusion_block
| false
| 17,883
|
[
"MIT"
] | 4
|
f8b744f96a3a68cf0784080ffd561a5279715727
|
https://github.com/RongchangLi/DEN/tree/f8b744f96a3a68cf0784080ffd561a5279715727
|
CONV1d_FusionBlock
|
import torch
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
class CONV1d_FusionBlock(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(CONV1d_FusionBlock, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.temporal_conv = nn.Conv3d(in_channels=2 * self.fold,
out_channels=2 * self.fold, kernel_size=(3, 1, 1), padding=(1,
0, 0), stride=1, bias=True)
nn.init.constant_(self.temporal_conv.weight, 0)
nn.init.constant_(self.temporal_conv.bias, 0)
def forward(self, x):
"""
:param x: (nt, c, h, w)
:return:(nt, c, h, w)
"""
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w).transpose(1, 2)
out_part = x[:, :2 * self.fold]
out_part = self.temporal_conv(out_part)
out = torch.zeros_like(x)
out[:, :2 * self.fold] = out_part
out[:, 2 * self.fold:] = x[:, 2 * self.fold:]
out = out.transpose(1, 2).contiguous().view(nt, c, h, w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_segment': 4, 'n_div': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.optim
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + x3, tmp2 & xmask, other=0.0)
tmp4 = tmp0 < tmp1
tmp5 = tl.load(in_ptr1 + (x0 + 16 * x2 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = 0.0
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tl.where(tmp2, tmp3, tmp11)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 2, 3, 1, 1), (6, 3, 1, 1, 1))
assert_size_stride(primals_3, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 2, 4, 4, 4), (128, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(128)](primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1, 1),
padding=(1, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 2, 4, 4, 4), (128, 64, 16, 4, 1))
del buf0
buf2 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_1, buf1, primals_3,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(primals_1, (1, 2, 4, 4, 4), (256,
16, 64, 4, 1), 0)
class CONV1d_FusionBlockNew(nn.Module):
def __init__(self, in_channels, n_segment, n_div):
super(CONV1d_FusionBlockNew, self).__init__()
self.n_div = n_div
self.fold = in_channels // n_div
self.n_segment = n_segment
self.temporal_conv = nn.Conv3d(in_channels=2 * self.fold,
out_channels=2 * self.fold, kernel_size=(3, 1, 1), padding=(1,
0, 0), stride=1, bias=True)
nn.init.constant_(self.temporal_conv.weight, 0)
nn.init.constant_(self.temporal_conv.bias, 0)
def forward(self, input_0):
primals_2 = self.temporal_conv.weight
primals_3 = self.temporal_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
RongchangLi/DEN
|
CONV1d_FusionBlock
| false
| 17,884
|
[
"MIT"
] | 4
|
f8b744f96a3a68cf0784080ffd561a5279715727
|
https://github.com/RongchangLi/DEN/tree/f8b744f96a3a68cf0784080ffd561a5279715727
|
channel_attention
|
import torch
from torch import nn
class channel_attention(nn.Module):
def __init__(self, in_channels, feature_size):
super(channel_attention, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, 1, bias=False)
self.bias = nn.Parameter(torch.zeros(in_channels))
self.softmax = nn.Softmax()
def forward(self, target_feature):
b, c, h, w = target_feature.shape
target_feature_resize = target_feature.view(b, c, h * w)
c_f = self.fc1(target_feature_resize)
c_f = self.relu1(c_f)
c_f = self.fc2(c_f)
c_f = c_f.view(b, c)
channel_attention_weight = self.softmax(c_f + self.bias)
return channel_attention_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'feature_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + x0, tmp18, xmask)
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1),
0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (4, 1), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_add_2[grid(4)](buf3, primals_4, buf4,
buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_add_3[grid(16)](buf6, primals_4, buf4,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del buf5
del primals_4
return buf6, reinterpret_tensor(primals_1, (16, 16), (16, 1), 0
), buf2, buf6, primals_3, buf7
class channel_attentionNew(nn.Module):
def __init__(self, in_channels, feature_size):
super(channel_attentionNew, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, 1, bias=False)
self.bias = nn.Parameter(torch.zeros(in_channels))
self.softmax = nn.Softmax()
def forward(self, input_0):
primals_4 = self.bias
primals_2 = self.fc1.weight
primals_3 = self.fc2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
SCUT-AILab/AFA
|
channel_attention
| false
| 17,885
|
[
"BSD-3-Clause"
] | 7
|
acfb42236ce0114d63f22a821fc5954c8c149f45
|
https://github.com/SCUT-AILab/AFA/tree/acfb42236ce0114d63f22a821fc5954c8c149f45
|
MLPSoftQNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPSoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003):
super(MLPSoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.linear4 = nn.Linear(hidden_size3, 1)
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 5600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1400
x1 = xindex // 1400
tmp0 = tl.load(in_out_ptr0 + (x0 + 1408 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 1408 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1400, 8), (8, 1))
assert_size_stride(primals_4, (1400,), (1,))
assert_size_stride(primals_5, (1024, 1400), (1400, 1))
assert_size_stride(primals_6, (1024,), (1,))
assert_size_stride(primals_7, (256, 1024), (1024, 1))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (1, 256), (256, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 1400), (1408, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 1400), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(5600)](buf2, primals_4, 5600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (1400, 1024),
(1, 1400), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(4096)](buf4, primals_6, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (1024, 256),
(1, 1024), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_relu_3[grid(1024)](buf6, primals_8, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_8
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9,
(256, 1), (1, 256), 0), alpha=1, beta=1, out=buf8)
del primals_10
return buf8, buf0, buf2, buf4, buf6, primals_9, primals_7, primals_5
class MLPSoftQNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003):
super(MLPSoftQNetworkNew, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.linear4 = nn.Linear(hidden_size3, 1)
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_5 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_9 = self.linear4.weight
primals_10 = self.linear4.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
SAMMiCA/DL_based_E2E_Driving
|
MLPSoftQNetwork
| false
| 17,886
|
[
"MIT"
] | 4
|
01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
Fusion
|
import torch
import torch.nn as nn
class Fusion(nn.Module):
""" Crazy multi-modal fusion: negative squared difference minus relu'd sum
"""
def __init__(self):
super().__init__()
def forward(self, x, y):
return -(x - y) ** 2 + torch.relu(x + y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_neg_pow_relu_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -tmp3
tmp5 = tmp0 + tmp1
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_neg_pow_relu_sub_0[grid(256)](arg0_1, arg1_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FusionNew(nn.Module):
""" Crazy multi-modal fusion: negative squared difference minus relu'd sum
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Ruiver/CTCNet
|
Fusion
| false
| 17,887
|
[
"Apache-2.0"
] | 6
|
539e55ec9fed06028379d35dfd5cd4074755ffd8
|
https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8
|
EncoderBlock
|
import torch
import torch.nn.functional as F
from torch import nn
class EncoderBlock(nn.Module):
"""
Encoder block class
"""
def __init__(self, in_channels, out_channels, k_size, pad_size):
super(EncoderBlock, self).__init__()
self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
def forward(self, x):
x = F.leaky_relu(self.IN1(self.conv1(x)), inplace=True)
x = F.leaky_relu(self.IN2(self.conv2(x)), inplace=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'k_size': 4,
'pad_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 729
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 729 * x0), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tl.where(rmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp10 = tl.full([1], 729, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = tl.where(rmask, tmp15, 0)
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp19 = tmp2 - tmp12
tmp20 = 729.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tmp31 = tmp30 > tmp26
tl.store(in_out_ptr0 + (r1 + 729 * x0), tmp2, rmask)
tl.store(out_ptr2 + (r1 + 729 * x0), tmp30, rmask)
tl.store(out_ptr3 + (r1 + 729 * x0), tmp31, rmask)
tl.store(out_ptr4 + x0, tmp24, None)
tl.store(out_ptr0 + x0, tmp12, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 2744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 2744 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r1 + 2744 * x0), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp7 = tl.load(in_out_ptr0 + (r1 + 2744 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp8 = tmp7 - tmp4
tmp9 = 2744.0
tmp10 = tmp5 / tmp9
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = 0.01
tmp18 = tmp14 * tmp17
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tmp19 > tmp15
tl.store(out_ptr2 + (r1 + 2744 * x0), tmp19, rmask & xmask)
tl.store(out_ptr3 + (r1 + 2816 * x0), tmp20, rmask & xmask)
tmp21 = 2744.0
tmp22 = tmp5 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(out_ptr4 + x0, tmp25, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 9, 9, 9), (2916, 729, 81, 9, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
buf6 = empty_strided_cuda((4, 9, 9, 9), (729, 81, 9, 1), torch.float32)
buf15 = empty_strided_cuda((4, 9, 9, 9), (729, 81, 9, 1), torch.bool)
buf5 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_0[
grid(4)](buf1, primals_2, buf2, buf6, buf15, buf5, 4, 729,
num_warps=8, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 9,
9, 9), (0, 729, 81, 9, 1), 0), primals_4, stride=(1, 1, 1),
padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 14, 14, 14), (10976, 2744, 196, 14, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
buf13 = empty_strided_cuda((4, 14, 14, 14), (2744, 196, 14, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 14, 14, 14), (2816, 196, 14, 1),
torch.bool)
buf12 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1[
grid(4)](buf8, primals_5, buf9, buf13, buf14, buf12, 4, 2744,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_5
return buf13, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, reinterpret_tensor(buf5, (
4,), (1,), 0), reinterpret_tensor(buf6, (1, 4, 9, 9, 9), (2916, 729,
81, 9, 1), 0), buf8, reinterpret_tensor(buf12, (4,), (1,), 0
), buf14, reinterpret_tensor(buf9, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0
), buf15, reinterpret_tensor(buf2, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0)
class EncoderBlockNew(nn.Module):
"""
Encoder block class
"""
def __init__(self, in_channels, out_channels, k_size, pad_size):
super(EncoderBlockNew, self).__init__()
self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
SVRTK/Segmentation_FetalMRI
|
EncoderBlock
| false
| 17,888
|
[
"Apache-2.0"
] | 6
|
9344a2248cbe8e4cccbe05ca98214626dcf62805
|
https://github.com/SVRTK/Segmentation_FetalMRI/tree/9344a2248cbe8e4cccbe05ca98214626dcf62805
|
pixel_attention
|
import torch
from torch import nn
class pixel_attention(nn.Module):
def __init__(self, in_channels, feature_size):
super(pixel_attention, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, feature_size * feature_size,
bias=True)
self.softmax = nn.Softmax()
def forward(self, target_feature):
b, c, h, w = target_feature.shape
target_feature_resize = target_feature.view(b, c, h * w)
p_f = torch.mean(target_feature_resize, dim=1)
p_f = self.fc1(p_f)
p_f = self.relu1(p_f)
p_f = self.fc2(p_f)
p_f = p_f.view(b, h * w)
pixel_attention_weight = self.softmax(p_f)
pixel_attention_weight = pixel_attention_weight.reshape(b, 1, h * w)
return pixel_attention_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'feature_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (16, 4), (1,
16), 0), out=buf1)
del primals_2
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3,
(4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_4
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused__softmax_2[grid(4)](buf3, buf6, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
return reinterpret_tensor(buf6, (4, 1, 16), (16, 16, 1), 0
), buf0, buf2, buf6, primals_3
class pixel_attentionNew(nn.Module):
def __init__(self, in_channels, feature_size):
super(pixel_attentionNew, self).__init__()
self.fc1 = nn.Linear(feature_size * feature_size, feature_size,
bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(feature_size, feature_size * feature_size,
bias=True)
self.softmax = nn.Softmax()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc2.weight
primals_4 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
SCUT-AILab/AFA
|
pixel_attention
| false
| 17,889
|
[
"BSD-3-Clause"
] | 7
|
acfb42236ce0114d63f22a821fc5954c8c149f45
|
https://github.com/SCUT-AILab/AFA/tree/acfb42236ce0114d63f22a821fc5954c8c149f45
|
QREmbeddingBag
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class QREmbeddingBag(nn.Module):
"""Computes sums or means over two 'bags' of embeddings, one using the quotient
of the indices and the other using the remainder of the indices, without
instantiating the intermediate embeddings, then performs an operation to combine these.
For bags of constant length and no :attr:`per_sample_weights`, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
QREmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Known Issues:
Autograd breaks with multiple GPUs. It breaks only with multiple embeddings.
Args:
num_categories (int): total number of unique categories. The input indices must be in
0, 1, ..., num_categories - 1.
embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"``
or ``"mult"`` operation are used, these embedding dimensions must be
the same. If a single embedding_dim is used, then it will use this
embedding_dim for both embedding tables.
num_collisions (int): number of collisions to enforce.
operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation
to compose embeddings. ``"concat"`` concatenates the embeddings,
``"add"`` sums the embeddings, and ``"mult"`` multiplies
(component-wise) the embeddings.
Default: ``"mult"``
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
Attributes:
weight (Tensor): the learnable weights of each embedding table is the module of shape
`(num_embeddings, embedding_dim)` initialized using a uniform distribution
with sqrt(1 / num_categories).
Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and
:attr:`per_index_weights` (Tensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`,
it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and
this will return ``B`` values aggregated in a way depending on the :attr:`mode`.
:attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`,
it will be treated as a concatenation of multiple bags (sequences).
:attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore,
for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as
having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Output shape: `(B, embedding_dim)`
"""
__constants__ = ['num_categories', 'embedding_dim', 'num_collisions',
'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq', 'mode',
'sparse']
def __init__(self, num_categories, embedding_dim, num_collisions,
operation='mult', max_norm=None, norm_type=2.0, scale_grad_by_freq=
False, mode='mean', sparse=False, _weight=None):
super(QREmbeddingBag, self).__init__()
assert operation in ['concat', 'mult', 'add'], 'Not valid operation!'
self.num_categories = num_categories
if isinstance(embedding_dim, int) or len(embedding_dim) == 1:
self.embedding_dim = [embedding_dim, embedding_dim]
else:
self.embedding_dim = embedding_dim
self.num_collisions = num_collisions
self.operation = operation
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if self.operation == 'add' or self.operation == 'mult':
assert self.embedding_dim[0] == self.embedding_dim[1
], 'Embedding dimensions do not match!'
self.num_embeddings = [int(np.ceil(num_categories / num_collisions)
), num_collisions]
if _weight is None:
self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0],
self.embedding_dim[0]))
self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1],
self.embedding_dim[1]))
self.reset_parameters()
else:
assert list(_weight[0].shape) == [self.num_embeddings[0], self.
embedding_dim[0]
], 'Shape of weight for quotient table does not match num_embeddings and embedding_dim'
assert list(_weight[1].shape) == [self.num_embeddings[1], self.
embedding_dim[1]
], 'Shape of weight for remainder table does not match num_embeddings and embedding_dim'
self.weight_q = Parameter(_weight[0])
self.weight_r = Parameter(_weight[1])
self.mode = mode
self.sparse = sparse
def reset_parameters(self):
nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories))
nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories))
def forward(self, input, offsets=None, per_sample_weights=None):
input_q = (input / self.num_collisions).long()
input_r = torch.remainder(input, self.num_collisions).long()
embed_q = F.embedding_bag(input_q, self.weight_q, offsets, self.
max_norm, self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
embed_r = F.embedding_bag(input_r, self.weight_r, offsets, self.
max_norm, self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
if self.operation == 'concat':
embed = torch.cat((embed_q, embed_r), dim=1)
elif self.operation == 'add':
embed = embed_q + embed_r
elif self.operation == 'mult':
embed = embed_q * embed_r
return embed
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_categories': 4, 'embedding_dim': 4, 'num_collisions': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 4 * x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_div_remainder_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = 4.0
tmp5 = tmp0 % tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = tmp5 != tmp6
tmp8 = libdevice.signbit(tmp5) if tmp5.dtype is tl.float32 else tmp5 < 0
tmp9 = libdevice.signbit(tmp4) if tmp4.dtype is tl.float32 else tmp4 < 0
tmp10 = tmp8 != tmp9
tmp11 = tmp7 & tmp10
tmp12 = tmp5 + tmp4
tmp13 = tl.where(tmp11, tmp12, tmp5)
tmp14 = tmp13.to(tl.int64)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused__to_copy_div_remainder_1[grid(16)](primals_1, buf1,
buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf2 = torch.ops.aten._embedding_bag.default(primals_2,
reinterpret_tensor(buf1, (16,), (1,), 0), buf0, False, 1)
del primals_2
buf3 = buf2[0]
buf4 = buf2[1]
buf5 = buf2[2]
buf6 = buf2[3]
del buf2
buf8 = torch.ops.aten._embedding_bag.default(primals_3,
reinterpret_tensor(buf7, (16,), (1,), 0), buf0, False, 1)
del primals_3
buf9 = buf8[0]
buf10 = buf8[1]
buf11 = buf8[2]
buf12 = buf8[3]
del buf8
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_2[grid(16)](buf3, buf9, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf13, buf0, reinterpret_tensor(buf1, (16,), (1,), 0
), buf3, buf4, buf5, buf6, reinterpret_tensor(buf7, (16,), (1,), 0
), buf9, buf10, buf11, buf12
class QREmbeddingBagNew(nn.Module):
"""Computes sums or means over two 'bags' of embeddings, one using the quotient
of the indices and the other using the remainder of the indices, without
instantiating the intermediate embeddings, then performs an operation to combine these.
For bags of constant length and no :attr:`per_sample_weights`, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
QREmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Known Issues:
Autograd breaks with multiple GPUs. It breaks only with multiple embeddings.
Args:
num_categories (int): total number of unique categories. The input indices must be in
0, 1, ..., num_categories - 1.
embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"``
or ``"mult"`` operation are used, these embedding dimensions must be
the same. If a single embedding_dim is used, then it will use this
embedding_dim for both embedding tables.
num_collisions (int): number of collisions to enforce.
operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation
to compose embeddings. ``"concat"`` concatenates the embeddings,
``"add"`` sums the embeddings, and ``"mult"`` multiplies
(component-wise) the embeddings.
Default: ``"mult"``
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
Attributes:
weight (Tensor): the learnable weights of each embedding table is the module of shape
`(num_embeddings, embedding_dim)` initialized using a uniform distribution
with sqrt(1 / num_categories).
Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and
:attr:`per_index_weights` (Tensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`,
it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and
this will return ``B`` values aggregated in a way depending on the :attr:`mode`.
:attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`,
it will be treated as a concatenation of multiple bags (sequences).
:attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore,
for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as
having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Output shape: `(B, embedding_dim)`
"""
__constants__ = ['num_categories', 'embedding_dim', 'num_collisions',
'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq', 'mode',
'sparse']
def __init__(self, num_categories, embedding_dim, num_collisions,
operation='mult', max_norm=None, norm_type=2.0, scale_grad_by_freq=
False, mode='mean', sparse=False, _weight=None):
super(QREmbeddingBagNew, self).__init__()
assert operation in ['concat', 'mult', 'add'], 'Not valid operation!'
self.num_categories = num_categories
if isinstance(embedding_dim, int) or len(embedding_dim) == 1:
self.embedding_dim = [embedding_dim, embedding_dim]
else:
self.embedding_dim = embedding_dim
self.num_collisions = num_collisions
self.operation = operation
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if self.operation == 'add' or self.operation == 'mult':
assert self.embedding_dim[0] == self.embedding_dim[1
], 'Embedding dimensions do not match!'
self.num_embeddings = [int(np.ceil(num_categories / num_collisions)
), num_collisions]
if _weight is None:
self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0],
self.embedding_dim[0]))
self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1],
self.embedding_dim[1]))
self.reset_parameters()
else:
assert list(_weight[0].shape) == [self.num_embeddings[0], self.
embedding_dim[0]
], 'Shape of weight for quotient table does not match num_embeddings and embedding_dim'
assert list(_weight[1].shape) == [self.num_embeddings[1], self.
embedding_dim[1]
], 'Shape of weight for remainder table does not match num_embeddings and embedding_dim'
self.weight_q = Parameter(_weight[0])
self.weight_r = Parameter(_weight[1])
self.mode = mode
self.sparse = sparse
def reset_parameters(self):
nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories))
nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories))
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
def forward(self, input_0):
primals_2 = self.weight_q
primals_1 = self.weight_r
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
STAR-Laboratory/Accelerating-RecSys-Training
|
QREmbeddingBag
| false
| 17,890
|
[
"MIT"
] | 5
|
e43cae6fd543813b352b01510e846febd67944ad
|
https://github.com/STAR-Laboratory/Accelerating-RecSys-Training/tree/e43cae6fd543813b352b01510e846febd67944ad
|
Classifier
|
import torch
import torch.nn as nn
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class Classifier(nn.Sequential):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(Classifier, self).__init__()
self.lin1 = FCNet(in_features, mid_features, activate='relu', drop=drop
)
self.lin2 = FCNet(mid_features, out_features, drop=drop)
self.bilinear = nn.Bilinear(in1_features=in_features, in2_features=
in_features, out_features=mid_features)
def forward(self, v, q):
"""
:param v: [batch, r1, features]
:param q: [batch, r2, features]
:return:
"""
num_obj = v.shape[2]
max_len = q.shape[2]
v_mean = v.sum(1) / num_obj
q_mean = q.sum(1) / max_len
out = self.lin1(v_mean * q_mean)
out = self.bilinear(v_mean, q_mean)
out = self.lin2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'mid_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sum_0[grid(64)](primals_2, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (
16, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf1, (16, 4),
(4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_5
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
triton_poi_fused_add_1[grid(64)](buf4, primals_6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_8
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
buf1, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1), 0
), primals_7
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class ClassifierNew(nn.Sequential):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(ClassifierNew, self).__init__()
self.lin1 = FCNet(in_features, mid_features, activate='relu', drop=drop
)
self.lin2 = FCNet(mid_features, out_features, drop=drop)
self.bilinear = nn.Bilinear(in1_features=in_features, in2_features=
in_features, out_features=mid_features)
def forward(self, input_0, input_1):
primals_3 = self.lin1.lin.weight
primals_4 = self.lin1.lin.bias
primals_7 = self.lin2.lin.weight
primals_6 = self.lin2.lin.bias
primals_5 = self.bilinear.weight
primals_8 = self.bilinear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
Ruiver/CTCNet
|
Classifier
| false
| 17,891
|
[
"Apache-2.0"
] | 6
|
539e55ec9fed06028379d35dfd5cd4074755ffd8
|
https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8
|
MLPPolicyNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
class MLPPolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003, log_std_min=-20,
log_std_max=2):
super(MLPPolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.num_inputs = num_inputs
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.mean_linear = nn.Linear(hidden_size3, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size3, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def sample(self, state, scale, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_pi = normal.log_prob(z) - torch.log(scale * (1 - action.pow(2)) +
epsilon)
log_pi = log_pi.sum(1, keepdim=True)
return action, log_pi, mean, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 89600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1400
x1 = xindex // 1400
tmp0 = tl.load(in_out_ptr0 + (x0 + 1408 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 1408 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 1408 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (1400, 4), (4, 1))
assert_size_stride(primals_2, (1400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1024, 1400), (1400, 1))
assert_size_stride(primals_5, (1024,), (1,))
assert_size_stride(primals_6, (256, 1024), (1024, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 256), (256, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1400), (1408, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1400), (22528, 5632, 1408,
1), 0)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 1400), (22528, 5632, 1408, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(89600)](buf1,
primals_2, buf12, 89600, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1400), (1408, 1), 0
), reinterpret_tensor(primals_4, (1400, 1024), (1, 1400), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1024), (16384, 4096, 1024,
1), 0)
del buf2
buf11 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(65536)](buf3,
primals_5, buf11, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), reinterpret_tensor(primals_6, (1024, 256), (1, 1024), 0),
out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf4
buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16384)](buf5,
primals_7, buf10, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_8, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_10, (256, 4), (1, 256), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_3[grid(256)](buf7,
primals_11, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del primals_11
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 1400), (1408, 1), 0
), reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), reinterpret_tensor(buf5, (64, 256), (256, 1), 0
), buf9, primals_10, primals_8, buf10, primals_6, buf11, primals_4, buf12
class MLPPolicyNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size1=1400,
hidden_size2=1024, hidden_size3=256, init_w=0.003, log_std_min=-20,
log_std_max=2):
super(MLPPolicyNetworkNew, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.num_inputs = num_inputs
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_size1)
self.linear2 = nn.Linear(hidden_size1, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, hidden_size3)
self.mean_linear = nn.Linear(hidden_size3, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size3, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def sample(self, state, scale, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_pi = normal.log_prob(z) - torch.log(scale * (1 - action.pow(2)) +
epsilon)
log_pi = log_pi.sum(1, keepdim=True)
return action, log_pi, mean, std
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.mean_linear.weight
primals_9 = self.mean_linear.bias
primals_10 = self.log_std_linear.weight
primals_11 = self.log_std_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
SAMMiCA/DL_based_E2E_Driving
|
MLPPolicyNetwork
| false
| 17,892
|
[
"MIT"
] | 4
|
01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
https://github.com/SAMMiCA/DL_based_E2E_Driving/tree/01f7d74a0db7ed745cf27b9a1ebab0246015ecbd
|
HardWeightedSum
|
import torch
from torch import nn
class HardWeightedSum(nn.Module):
def __init__(self, op_number=2, act=nn.ReLU, eps=0.0001):
super(HardWeightedSum, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
self.act = act()
self.eps = eps
def forward(self, x):
weights_num = self.act(self.weights)
weights_denom = torch.sum(weights_num) + self.eps
return torch.sum(weights_num * x / weights_denom, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_relu_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_add_div_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp5 = tmp3 * tmp4
tmp8 = 0.0001
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp13 = triton_helpers.maximum(tmp2, tmp12)
tmp14 = tmp13 * tmp4
tmp15 = tmp14 / tmp9
tmp16 = tmp10 + tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 1, 1, 1), (1, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_relu_sum_0[grid(1)](primals_1, buf0, 1, 2, XBLOCK=
1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_relu_sum_1[grid(256)](primals_1,
primals_2, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
return buf1, primals_1, primals_2
class HardWeightedSumNew(nn.Module):
def __init__(self, op_number=2, act=nn.ReLU, eps=0.0001):
super(HardWeightedSumNew, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
self.act = act()
self.eps = eps
def forward(self, input_0):
primals_1 = self.weights
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Senyaaa/detection-experiments
|
HardWeightedSum
| false
| 17,893
|
[
"Apache-2.0"
] | 5
|
5e80dd458e886ca27db5420d25ade8f9d74ae5a8
|
https://github.com/Senyaaa/detection-experiments/tree/5e80dd458e886ca27db5420d25ade8f9d74ae5a8
|
DecoderBlock
|
import torch
from functools import partial
import torch.nn.functional as F
from torch import nn
class DecoderBlock(nn.Module):
"""
Decoder block class
"""
def __init__(self, in_channels, middle_channels, out_channels, k_size,
pad_size):
super(DecoderBlock, self).__init__()
self.conv1 = nn.Conv3d(in_channels, middle_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(middle_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
self.upsample = partial(F.interpolate, scale_factor=2, mode=
'trilinear', align_corners=False)
def forward(self, x):
x = F.leaky_relu(self.IN1(self.conv1(x)), inplace=True)
x = F.leaky_relu(self.IN2(self.conv2(x)), inplace=True)
x = self.upsample(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'middle_channels': 4, 'out_channels': 4,
'k_size': 4, 'pad_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from functools import partial
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 729
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 729 * x3), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tl.where(rmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp10 = tl.full([1], 729, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = tl.where(rmask, tmp15, 0)
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp19 = tmp2 - tmp12
tmp20 = 729.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(in_out_ptr0 + (r2 + 729 * x3), tmp2, rmask)
tl.store(out_ptr2 + (r2 + 729 * x3), tmp30, rmask)
tl.store(out_ptr3 + x3, tmp24, None)
tl.store(out_ptr0 + x3, tmp12, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xnumel = 16
rnumel = 2744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 4
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 2744 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 2744 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tmp7 = 2744.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 13, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_mul_sub_5(in_out_ptr2, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK: tl.constexpr):
xnumel = 351232
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 784 % 28
x1 = xindex // 28 % 28
x0 = xindex % 28
x3 = xindex // 21952
x5 = xindex
tmp0 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + x2, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr9 + x0, xmask, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last')
tmp98 = tl.load(in_ptr11 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 14, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp10 = tmp9 + tmp1
tmp11 = tmp9 < 0
tmp12 = tl.where(tmp11, tmp10, tmp9)
tmp13 = tl.load(in_ptr3 + (tmp12 + 14 * tmp8 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp15 = tmp13 - tmp14
tmp17 = tmp15 * tmp16
tmp18 = 0.0
tmp19 = tmp17 > tmp18
tmp20 = 0.01
tmp21 = tmp17 * tmp20
tmp22 = tl.where(tmp19, tmp17, tmp21)
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr3 + (tmp12 + 14 * tmp8 + 196 * tmp26 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp28 = tmp27 - tmp14
tmp29 = tmp28 * tmp16
tmp30 = tmp29 > tmp18
tmp31 = tmp29 * tmp20
tmp32 = tl.where(tmp30, tmp29, tmp31)
tmp34 = tmp33 + tmp1
tmp35 = tmp33 < 0
tmp36 = tl.where(tmp35, tmp34, tmp33)
tmp37 = tl.load(in_ptr3 + (tmp12 + 14 * tmp36 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp38 = tmp37 - tmp14
tmp39 = tmp38 * tmp16
tmp40 = tmp39 > tmp18
tmp41 = tmp39 * tmp20
tmp42 = tl.where(tmp40, tmp39, tmp41)
tmp43 = tl.load(in_ptr3 + (tmp12 + 14 * tmp36 + 196 * tmp26 + 2744 * x3
), xmask, eviction_policy='evict_last')
tmp44 = tmp43 - tmp14
tmp45 = tmp44 * tmp16
tmp46 = tmp45 > tmp18
tmp47 = tmp45 * tmp20
tmp48 = tl.where(tmp46, tmp45, tmp47)
tmp50 = tmp49 + tmp1
tmp51 = tmp49 < 0
tmp52 = tl.where(tmp51, tmp50, tmp49)
tmp53 = tl.load(in_ptr3 + (tmp52 + 14 * tmp36 + 196 * tmp26 + 2744 * x3
), xmask, eviction_policy='evict_last')
tmp54 = tmp53 - tmp14
tmp55 = tmp54 * tmp16
tmp56 = tmp55 > tmp18
tmp57 = tmp55 * tmp20
tmp58 = tl.where(tmp56, tmp55, tmp57)
tmp59 = tmp58 - tmp48
tmp61 = tmp59 * tmp60
tmp62 = tmp48 + tmp61
tmp63 = tl.load(in_ptr3 + (tmp52 + 14 * tmp36 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp64 = tmp63 - tmp14
tmp65 = tmp64 * tmp16
tmp66 = tmp65 > tmp18
tmp67 = tmp65 * tmp20
tmp68 = tl.where(tmp66, tmp65, tmp67)
tmp69 = tmp68 - tmp42
tmp70 = tmp69 * tmp60
tmp71 = tmp42 + tmp70
tmp72 = tl.load(in_ptr3 + (tmp52 + 14 * tmp8 + 196 * tmp4 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp73 = tmp72 - tmp14
tmp74 = tmp73 * tmp16
tmp75 = tmp74 > tmp18
tmp76 = tmp74 * tmp20
tmp77 = tl.where(tmp75, tmp74, tmp76)
tmp78 = tmp77 - tmp22
tmp79 = tmp78 * tmp60
tmp80 = tmp22 + tmp79
tmp81 = tmp80 - tmp71
tmp82 = tl.load(in_ptr3 + (tmp52 + 14 * tmp8 + 196 * tmp26 + 2744 * x3),
xmask, eviction_policy='evict_last')
tmp83 = tmp82 - tmp14
tmp84 = tmp83 * tmp16
tmp85 = tmp84 > tmp18
tmp86 = tmp84 * tmp20
tmp87 = tl.where(tmp85, tmp84, tmp86)
tmp88 = tmp87 - tmp32
tmp89 = tmp88 * tmp60
tmp90 = tmp32 + tmp89
tmp91 = tmp90 - tmp62
tmp93 = tmp81 * tmp92
tmp94 = tmp71 + tmp93
tmp95 = tmp91 * tmp92
tmp96 = tmp62 + tmp95
tmp97 = tmp96 - tmp94
tmp99 = tmp97 * tmp98
tmp100 = tmp94 + tmp99
tl.store(in_out_ptr2 + x5, tmp100, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9, 9), (2916, 729, 81, 9, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16),
torch.float32)
buf6 = empty_strided_cuda((4, 4, 9, 9, 9), (2916, 729, 81, 9, 1),
torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16),
torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_0[grid
(16)](buf1, primals_2, buf2, buf6, buf5, 16, 729, num_warps=8,
num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1, 1),
padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 14, 14, 14), (10976, 2744, 196, 14, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch
.float32)
buf10 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16),
torch.float32)
buf12 = reinterpret_tensor(buf10, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0
)
del buf10
triton_red_fused__native_batch_norm_legit_convolution_1[grid(16)](buf8,
buf12, primals_5, buf9, 16, 2744, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
del primals_5
buf13 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.int64)
triton_poi_fused__to_copy_2[grid(28)](buf13, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.int64)
triton_poi_fused_add_clamp_3[grid(28)](buf14, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((28, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_2[grid(28)](buf15, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((28, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_3[grid(28)](buf16, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((28,), (1,), torch.int64)
triton_poi_fused__to_copy_2[grid(28)](buf17, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((28,), (1,), torch.int64)
triton_poi_fused_add_clamp_3[grid(28)](buf18, 28, XBLOCK=32,
num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((28,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4[grid(28)](buf23,
28, XBLOCK=32, num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((28, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4[grid(28)](buf26,
28, XBLOCK=32, num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((28, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_4[grid(28)](buf29,
28, XBLOCK=32, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 28, 28, 28), (87808, 21952, 784,
28, 1), torch.float32)
buf27 = buf20
del buf20
buf30 = buf27
del buf27
triton_poi_fused__unsafe_index_add_mul_sub_5[grid(351232)](buf30,
buf13, buf16, buf17, buf8, buf9, buf12, buf14, buf15, buf18,
buf23, buf26, buf29, 351232, XBLOCK=512, num_warps=8, num_stages=1)
return (buf30, primals_1, primals_3, primals_4, buf1,
reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf8, buf9, buf12,
buf13, buf14, buf15, buf16, buf17, buf18, buf23, buf26, buf29,
reinterpret_tensor(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0))
class DecoderBlockNew(nn.Module):
"""
Decoder block class
"""
def __init__(self, in_channels, middle_channels, out_channels, k_size,
pad_size):
super(DecoderBlockNew, self).__init__()
self.conv1 = nn.Conv3d(in_channels, middle_channels, kernel_size=
k_size, padding=pad_size)
self.conv2 = nn.Conv3d(middle_channels, out_channels, kernel_size=
k_size, padding=pad_size)
self.IN1 = nn.InstanceNorm3d(out_channels)
self.IN2 = nn.InstanceNorm3d(out_channels)
self.upsample = partial(F.interpolate, scale_factor=2, mode=
'trilinear', align_corners=False)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = self.conv2.weight
primals_5 = self.conv2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
SVRTK/Segmentation_FetalMRI
|
DecoderBlock
| false
| 17,894
|
[
"Apache-2.0"
] | 6
|
9344a2248cbe8e4cccbe05ca98214626dcf62805
|
https://github.com/SVRTK/Segmentation_FetalMRI/tree/9344a2248cbe8e4cccbe05ca98214626dcf62805
|
SoftMaxWeightedSum
|
import torch
from torch import nn
class SoftMaxWeightedSum(nn.Module):
def __init__(self, op_number=2):
super(SoftMaxWeightedSum, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
def forward(self, x):
return torch.sum(torch.softmax(self.weights, dim=0) * x, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x0, xmask)
tmp11 = tl.load(in_ptr0 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp4 = tmp1 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp8 = tmp5 / tmp7
tmp10 = tmp8 * tmp9
tmp13 = tmp12 - tmp3
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp7
tmp16 = tmp15 * tmp9
tmp17 = tmp10 + tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 1, 1, 1), (1, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.
float32)
buf1 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.
float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(1)](primals_1, buf0, buf1, 1, 2,
XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_1[grid(256)](primals_1, buf0,
buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
return buf2, primals_1, primals_2
class SoftMaxWeightedSumNew(nn.Module):
def __init__(self, op_number=2):
super(SoftMaxWeightedSumNew, self).__init__()
shape = op_number, 1, 1, 1, 1
self.weights = nn.Parameter(torch.ones(shape), requires_grad=True)
def forward(self, input_0):
primals_1 = self.weights
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Senyaaa/detection-experiments
|
SoftMaxWeightedSum
| false
| 17,895
|
[
"Apache-2.0"
] | 5
|
5e80dd458e886ca27db5420d25ade8f9d74ae5a8
|
https://github.com/Senyaaa/detection-experiments/tree/5e80dd458e886ca27db5420d25ade8f9d74ae5a8
|
Decoder
|
import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, n_features, n_modes, T):
super(Decoder, self).__init__()
self.n_modes = n_modes
self.T = T
self.linear1 = nn.Linear(n_features, 4096)
self.linear2 = nn.Linear(512, n_modes * T * 2)
self.linear3 = nn.Linear(512, n_modes)
self.linear4 = nn.Linear(4096, 512)
self.softmax = nn.Softmax(dim=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.linear4(x)
x = self.relu(x)
pos = self.linear2(x)
prob = self.linear3(x)
prob = torch.sum(prob, axis=0)
prob = prob - torch.max(prob)
prob = self.softmax(prob)
return pos.view((-1, self.n_modes, self.T, 2)), prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'n_modes': 4, 'T': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2(in_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = triton_helpers.max2(tmp7, 1)[:, None]
tmp10 = tmp6 - tmp9
tmp11 = tmp6 == tmp9
tmp12 = libdevice.isnan(tmp6).to(tl.int1)
tmp13 = libdevice.isnan(tmp9).to(tl.int1)
tmp14 = tmp12 & tmp13
tmp15 = tmp11 | tmp14
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp10, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp15, None)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 4096), (4096, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (32, 512), (512, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (4, 512), (512, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(262144)](buf1,
primals_2, buf12, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0
), reinterpret_tensor(primals_4, (4096, 512), (1, 4096), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf2
buf11 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(32768)](buf3,
primals_5, buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 512),
(512, 1), 0), reinterpret_tensor(primals_6, (512, 32), (1, 512),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 512),
(512, 1), 0), reinterpret_tensor(primals_8, (512, 4), (1, 512),
0), alpha=1, beta=1, out=buf5)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_per_fused_eq_isnan_logical_and_logical_or_max_sub_sum_2[grid(1)
](buf5, buf7, buf10, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
return reinterpret_tensor(buf4, (64, 4, 4, 2), (32, 8, 2, 1), 0
), buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0
), reinterpret_tensor(buf3, (64, 512), (512, 1), 0
), buf9, buf10, primals_8, primals_6, buf11, primals_4, buf12
class DecoderNew(nn.Module):
def __init__(self, n_features, n_modes, T):
super(DecoderNew, self).__init__()
self.n_modes = n_modes
self.T = T
self.linear1 = nn.Linear(n_features, 4096)
self.linear2 = nn.Linear(512, n_modes * T * 2)
self.linear3 = nn.Linear(512, n_modes)
self.linear4 = nn.Linear(4096, 512)
self.softmax = nn.Softmax(dim=0)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_8 = self.linear3.weight
primals_9 = self.linear3.bias
primals_4 = self.linear4.weight
primals_5 = self.linear4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
SambaranRepo/VectorNet_Waymo
|
Decoder
| false
| 17,896
|
[
"MIT"
] | 4
|
454016a5020444e78943786c14e4e12a75ce052e
|
https://github.com/SambaranRepo/VectorNet_Waymo/tree/454016a5020444e78943786c14e4e12a75ce052e
|
resBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class resBlock(nn.Module):
def __init__(self, channelDepth, windowSize=3):
super(resBlock, self).__init__()
self.pad = nn.ReflectionPad2d(1)
self.IN_conv1 = nn.InstanceNorm2d(channelDepth)
self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
def forward(self, x):
res = x
x = F.relu(self.IN_conv1(self.conv1(self.pad(x))))
x = self.IN_conv1(self.conv2(self.pad(x)))
x = x + res
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channelDepth': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 + tmp26
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask)
tl.store(out_ptr3 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf4
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf6, primals_3, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_2[grid(576)](buf2, buf3,
buf6, buf7, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_3[grid(16)](
buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, XBLOCK
=1, num_warps=2, num_stages=1)
del primals_1
del primals_5
return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9,
reinterpret_tensor(buf13, (16,), (1,), 0), reinterpret_tensor(buf10,
(1, 16, 1, 1), (16, 1, 1, 1), 0))
class resBlockNew(nn.Module):
def __init__(self, channelDepth, windowSize=3):
super(resBlockNew, self).__init__()
self.pad = nn.ReflectionPad2d(1)
self.IN_conv1 = nn.InstanceNorm2d(channelDepth)
self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, 0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
SeokjaeLIM/DSSN_release-Pytorch
|
resBlock
| false
| 17,897
|
[
"Apache-2.0"
] | 7
|
fef1dac120d7b83367b4c69f239b089ab5f004d7
|
https://github.com/SeokjaeLIM/DSSN_release-Pytorch/tree/fef1dac120d7b83367b4c69f239b089ab5f004d7
|
WeightedFeatureFusion
|
import torch
import torch.nn as nn
from torchvision.models.resnet import *
import torch.utils.data
class WeightedFeatureFusion(nn.Module):
def __init__(self, layers, weight=False):
super(WeightedFeatureFusion, self).__init__()
self.layers = layers
self.weight = weight
self.n = len(layers) + 1
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)
def forward(self, x, outputs):
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n)
x = x * w[0]
nx = x.shape[1]
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[
self.layers[i]]
na = a.shape[1]
if nx == na:
x = x + a
elif nx > na:
x[:, :na] = x[:, :na] + a
else:
x = x + a[:, :nx]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([5, 4, 4, 4])]
def get_init_inputs():
return [[], {'layers': [4, 4]}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torchvision.models.resnet import *
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 + tmp1
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class WeightedFeatureFusionNew(nn.Module):
def __init__(self, layers, weight=False):
super(WeightedFeatureFusionNew, self).__init__()
self.layers = layers
self.weight = weight
self.n = len(layers) + 1
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
PanJason/ML_Proj
|
WeightedFeatureFusion
| false
| 17,898
|
[
"MIT"
] | 4
|
663be12e8eb6e30e3c902a4984ac0db33bfce605
|
https://github.com/PanJason/ML_Proj/tree/663be12e8eb6e30e3c902a4984ac0db33bfce605
|
ConformerFeedForward
|
import torch
from torch import nn
import torch.utils.data
import torch.optim
class Swish(nn.Module):
"""
Swish activation function introduced in 'https://arxiv.org/abs/1710.05941'
"""
def forward(self, x):
return x * torch.sigmoid(x)
class ConformerFeedForward(nn.Module):
"""
feed-forward module of Conformer model.
"""
def __init__(self, d_model, d_ff, dropout, activation=Swish()):
super(ConformerFeedForward, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
x = self.dropout(x)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class Swish(nn.Module):
"""
Swish activation function introduced in 'https://arxiv.org/abs/1710.05941'
"""
def forward(self, x):
return x * torch.sigmoid(x)
class ConformerFeedForwardNew(nn.Module):
"""
feed-forward module of Conformer model.
"""
def __init__(self, d_model, d_ff, dropout, activation=Swish()):
super(ConformerFeedForwardNew, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ShantanuNair/NeMo
|
ConformerFeedForward
| false
| 17,899
|
[
"Apache-2.0"
] | 10
|
d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b
|
https://github.com/ShantanuNair/NeMo/tree/d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b
|
FusionAttention
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class FusionAttention(nn.Module):
def __init__(self, dim):
super(FusionAttention, self).__init__()
self.attention_matrix = nn.Linear(dim, dim)
self.project_weight = nn.Linear(dim, 1)
def forward(self, inputs):
query_project = self.attention_matrix(inputs)
query_project = F.leaky_relu(query_project)
project_value = self.project_weight(query_project)
attention_weight = torch.softmax(project_value, dim=1)
attention_vec = inputs * attention_weight
attention_vec = torch.sum(attention_vec, dim=1)
return attention_vec, attention_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
triton_poi_fused_mul_sum_3[grid(64)](primals_3, buf6, buf7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf7, buf6, primals_3, buf1, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), buf6, primals_4
class FusionAttentionNew(nn.Module):
def __init__(self, dim):
super(FusionAttentionNew, self).__init__()
self.attention_matrix = nn.Linear(dim, dim)
self.project_weight = nn.Linear(dim, 1)
def forward(self, input_0):
primals_1 = self.attention_matrix.weight
primals_2 = self.attention_matrix.bias
primals_4 = self.project_weight.weight
primals_5 = self.project_weight.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
Seondong/Customs-Fraud-Detection
|
FusionAttention
| false
| 17,900
|
[
"MIT"
] | 7
|
eb9e4641a78cb32d73787de86dd72ebb09df1452
|
https://github.com/Seondong/Customs-Fraud-Detection/tree/eb9e4641a78cb32d73787de86dd72ebb09df1452
|
MultiLayerPerceptron
|
import torch
import torch.utils.data
import torch.optim
class MultiLayerPerceptron(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', getattr(torch, activation)
)
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, hidden_states):
output_states = hidden_states[:]
for i in range(self.layers):
output_states = getattr(self, f'layer{i}')(output_states)
if self.log_softmax:
output_states = torch.log_softmax(output_states, dim=-1)
return output_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class MultiLayerPerceptronNew(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', getattr(torch, activation)
)
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, input_0):
primals_2 = self.layer0.weight
primals_3 = self.layer0.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ShantanuNair/NeMo
|
MultiLayerPerceptron
| false
| 17,901
|
[
"Apache-2.0"
] | 10
|
d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b
|
https://github.com/ShantanuNair/NeMo/tree/d01b7bbc3fdb1bbf14789f71b8f368cf0aa8f86b
|
_Residual_Block
|
import torch
import torch.nn as nn
class _Residual_Block(nn.Module):
def __init__(self):
super(_Residual_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, x):
identity_data = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output, identity_data)
return output
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5_tmp[:, None]
tl.store(out_ptr1 + x0, tmp3, xmask)
tmp15 = tl.load(in_ptr2 + x0 % 64, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.2
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(in_out_ptr0 + (r1 + 4096 * x0), tmp21, rmask & xmask)
tmp22 = 4096.0
tmp23 = tmp4 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tl.store(out_ptr3 + x0, tmp26, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5_tmp[:, None]
tl.store(out_ptr1 + x0, tmp3, xmask)
x2 = xindex % 64
tmp15 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp17 = tl.load(in_ptr3 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr3 + (r1 + 4096 * x0), tmp18, rmask & xmask)
tmp19 = 4096.0
tmp20 = tmp4 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr4 + x0, tmp23, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64,), (1,))
assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((256,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
buf6 = empty_strided_cuda((1, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 64, 64, 64), (262144, 4096, 64,
1), 0)
del buf6
buf5 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
get_raw_stream(0)
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0[grid(256)
](buf7, primals_3, buf0, primals_4, buf1, buf2, buf5, 256, 4096,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_3
del primals_4
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = empty_strided_cuda((256,), (1,), torch.float32)
buf10 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf14 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf13 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_red_fused__native_batch_norm_legit_add_repeat_1[grid(256)](
primals_6, buf8, primals_7, primals_1, buf9, buf10, buf14,
buf13, 256, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
del primals_6
del primals_7
return (buf14, primals_1, primals_2, primals_5, buf0, buf1,
reinterpret_tensor(buf5, (256,), (1,), 0), buf7, buf8, buf9,
reinterpret_tensor(buf13, (256,), (1,), 0), reinterpret_tensor(
buf10, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf2,
(1, 256, 1, 1), (256, 1, 1, 1), 0))
class _Residual_BlockNew(nn.Module):
def __init__(self):
super(_Residual_BlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.in1.weight
primals_4 = self.in1.bias
primals_5 = self.conv2.weight
primals_6 = self.in2.weight
primals_7 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Shandilya21/Improved-Optimization-Tecniques-for-Super-Resoultion-in-Images
|
_Residual_Block
| false
| 17,902
|
[
"MIT"
] | 10
|
d903d99706f557d74e00d4395e7d316172a9f7ee
|
https://github.com/Shandilya21/Improved-Optimization-Tecniques-for-Super-Resoultion-in-Images/tree/d903d99706f557d74e00d4395e7d316172a9f7ee
|
DyIntraModalityUpdate
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class DyIntraModalityUpdate(nn.Module):
"""
Dynamic Intra-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(DyIntraModalityUpdate, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v4q_gate_lin = FCNet(v_size, output_size, drop=drop)
self.q4v_gate_lin = FCNet(q_size, output_size, drop=drop)
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.q_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, v, q):
"""
:param v: [batch_size, num_obj, feature_size]
:param q: [batch_size, max_len, feature_size]
:return:
"""
_batch_size, num_obj = v.shape[0], v.shape[1]
max_len = q.shape[1]
v_mean = v.sum(1) / num_obj
q_mean = q.sum(1) / max_len
v4q_gate = self.sigmoid(self.v4q_gate_lin(v_mean)).unsqueeze(1)
q4v_gate = self.sigmoid(self.q4v_gate_lin(q_mean)).unsqueeze(1)
v_tran = self.v_lin(v)
q_tran = self.q_lin(q)
v_key, v_query, v_val = torch.split(v_tran, v_tran.size(2) // 3, dim=2)
q_key, q_query, q_val = torch.split(q_tran, q_tran.size(2) // 3, dim=2)
gated_v_query = (1 + q4v_gate) * v_query
gated_v_key = (1 + q4v_gate) * v_key
gated_v_val = (1 + q4v_gate) * v_val
gated_q_query = (1 + v4q_gate) * q_query
gated_q_key = (1 + v4q_gate) * q_key
gated_q_val = (1 + v4q_gate) * q_val
v_key_set = torch.split(gated_v_key, gated_v_key.size(2) // self.
num_head, dim=2)
v_query_set = torch.split(gated_v_query, gated_v_query.size(2) //
self.num_head, dim=2)
v_val_set = torch.split(gated_v_val, gated_v_val.size(2) // self.
num_head, dim=2)
q_key_set = torch.split(gated_q_key, gated_q_key.size(2) // self.
num_head, dim=2)
q_query_set = torch.split(gated_q_query, gated_q_query.size(2) //
self.num_head, dim=2)
q_val_set = torch.split(gated_q_val, gated_q_val.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
v_key_slice, v_query_slice, v_val_slice = v_key_set[i
], v_query_set[i], v_val_set[i]
q_key_slice, q_query_slice, q_val_slice = q_key_set[i
], q_query_set[i], q_val_set[i]
v2v = v_query_slice @ v_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
q2q = q_query_slice @ q_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
dyIntranMAF_v2v = F.softmax(v2v, dim=2).unsqueeze(3)
dyIntranMAF_q2q = F.softmax(q2q, dim=2).unsqueeze(3)
v_update = (dyIntranMAF_v2v * v_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((v_update, (dyIntranMAF_v2v *
v_val_slice.unsqueeze(1)).sum(2)), dim=2)
q_update = (dyIntranMAF_q2q * q_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((q_update, (dyIntranMAF_q2q *
q_val_slice.unsqueeze(1)).sum(2)), dim=2)
updated_v = self.v_output(v + v_update)
updated_q = self.q_output(q + q_update)
return updated_v, updated_q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'v_size': 4, 'q_size': 4, 'output_size': 4, 'num_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 12 * x3), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + 12 * x3), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 12 * x3), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp3 * tmp6
tmp9 = tmp3 * tmp8
tl.store(out_ptr0 + x4, tmp5, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + 16 * x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (4 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (8 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (12 + 16 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (1 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (5 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (9 + 16 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (13 + 16 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (2 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (6 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (10 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (14 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_cat_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex // 4
x2 = xindex // 16
x3 = xindex
tmp34 = tl.load(in_ptr3 + x3, xmask)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x4, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x4), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (3 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (7 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (11 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (15 + 16 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tmp35 = tmp34 + tmp33
tl.store(in_out_ptr0 + x3, tmp35, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (12, 4), (4, 1))
assert_size_stride(primals_8, (12,), (1,))
assert_size_stride(primals_9, (12, 4), (4, 1))
assert_size_stride(primals_10, (12,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_sum_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
del primals_6
buf4 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 12), (1, 4), 0), out=buf4)
del primals_7
buf5 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf5)
del primals_9
buf6 = reinterpret_tensor(buf4, (4, 4, 12), (48, 12, 1), 0)
del buf4
buf61 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(192)](buf6,
primals_8, buf61, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf7 = reinterpret_tensor(buf5, (4, 4, 12), (48, 12, 1), 0)
del buf5
buf60 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(192)](buf7,
primals_10, buf60, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_10
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(64)](buf3, buf6, buf8, buf9, buf20,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf8, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf9, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf10, buf11, out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(64)](buf2, buf7, buf13, buf14,
buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 16), 0)
del buf11
triton_poi_fused_bmm_3[grid(16)](buf13, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = reinterpret_tensor(buf10, (4, 1, 4), (4, 16, 1), 0)
del buf10
triton_poi_fused_bmm_3[grid(16)](buf14, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf15, buf16, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf12, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf17, buf19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 16), 0)
del buf16
triton_poi_fused_bmm_5[grid(16)](buf8, buf22, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0)
del buf15
triton_poi_fused_bmm_5[grid(16)](buf9, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0)
del buf23
triton_poi_fused_bmm_5[grid(16)](buf13, buf25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf26 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0)
del buf22
triton_poi_fused_bmm_5[grid(16)](buf14, buf26, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf25, buf26, out=buf27)
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf24, buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf27, buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf18, buf20, buf28, buf30, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf19, buf21, buf29, buf31, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf32 = reinterpret_tensor(buf26, (4, 4, 1), (4, 1, 16), 0)
del buf26
triton_poi_fused_bmm_7[grid(16)](buf8, buf32, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf25, (4, 1, 4), (4, 16, 1), 0)
del buf25
triton_poi_fused_bmm_7[grid(16)](buf9, buf33, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf34 = buf29
del buf29
extern_kernels.bmm(buf32, buf33, out=buf34)
buf35 = reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 16), 0)
del buf33
triton_poi_fused_bmm_7[grid(16)](buf13, buf35, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf36 = reinterpret_tensor(buf32, (4, 1, 4), (4, 16, 1), 0)
del buf32
triton_poi_fused_bmm_7[grid(16)](buf14, buf36, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf37 = buf19
del buf19
extern_kernels.bmm(buf35, buf36, out=buf37)
buf38 = buf28
del buf28
triton_poi_fused__softmax_4[grid(64)](buf34, buf38, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf39 = buf18
del buf18
triton_poi_fused__softmax_4[grid(64)](buf37, buf39, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_8[grid(48)](buf30, buf38, buf20, buf40, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf30
buf41 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_8[grid(48)](buf31, buf39, buf21, buf41, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf31
buf42 = reinterpret_tensor(buf36, (4, 4, 1), (4, 1, 16), 0)
del buf36
triton_poi_fused_bmm_9[grid(16)](buf8, buf42, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf43 = reinterpret_tensor(buf35, (4, 1, 4), (4, 16, 1), 0)
del buf35
triton_poi_fused_bmm_9[grid(16)](buf9, buf43, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf44 = buf39
del buf39
extern_kernels.bmm(buf42, buf43, out=buf44)
buf45 = reinterpret_tensor(buf43, (4, 4, 1), (4, 1, 16), 0)
del buf43
triton_poi_fused_bmm_9[grid(16)](buf13, buf45, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf46 = reinterpret_tensor(buf42, (4, 1, 4), (4, 16, 1), 0)
del buf42
triton_poi_fused_bmm_9[grid(16)](buf14, buf46, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf47 = buf38
del buf38
extern_kernels.bmm(buf45, buf46, out=buf47)
del buf45
del buf46
buf48 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf44, buf48, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf49 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf47, buf49, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf50 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf52 = buf50
del buf50
triton_poi_fused_add_cat_10[grid(64)](buf52, buf40, buf48, buf20,
primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf40
buf51 = buf48
del buf48
buf55 = buf51
del buf51
triton_poi_fused_add_cat_10[grid(64)](buf55, buf41, buf49, buf21,
primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf41
buf53 = reinterpret_tensor(buf49, (16, 4), (4, 1), 0)
del buf49
extern_kernels.mm(reinterpret_tensor(buf52, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf53)
buf54 = reinterpret_tensor(buf53, (4, 4, 4), (16, 4, 1), 0)
del buf53
buf59 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_11[grid(64)](buf54,
primals_12, buf59, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf55, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf56)
buf57 = reinterpret_tensor(buf56, (4, 4, 4), (16, 4, 1), 0)
del buf56
buf58 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_11[grid(64)](buf57,
primals_14, buf58, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
return buf54, buf57, buf0, buf1, buf2, buf3, reinterpret_tensor(primals_1,
(16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 4
), reinterpret_tensor(buf6, (4, 4, 4), (48, 12, 1), 8
), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 0
), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 4
), reinterpret_tensor(buf7, (4, 4, 4), (48, 12, 1), 8
), buf12, buf17, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 0), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 0
), buf24, buf27, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 1), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 1
), buf34, buf37, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 2), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 2
), buf44, buf47, reinterpret_tensor(buf20, (4, 1, 4, 1), (16, 16, 4,
1), 3), reinterpret_tensor(buf21, (4, 1, 4, 1), (16, 16, 4, 1), 3
), reinterpret_tensor(buf52, (16, 4), (4, 1), 0), reinterpret_tensor(
buf55, (16, 4), (4, 1), 0
), buf58, primals_13, buf59, primals_11, reinterpret_tensor(buf13,
(4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf14, (4, 4, 1), (16,
4, 1), 3), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 3
), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 2
), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 2
), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 1
), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 1
), reinterpret_tensor(buf13, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf14, (4, 4, 1), (16, 4, 1), 0
), reinterpret_tensor(buf8, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf9, (4, 4, 1), (16, 4, 1), 0), buf60, buf61
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class DyIntraModalityUpdateNew(nn.Module):
"""
Dynamic Intra-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(DyIntraModalityUpdateNew, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v4q_gate_lin = FCNet(v_size, output_size, drop=drop)
self.q4v_gate_lin = FCNet(q_size, output_size, drop=drop)
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.q_output = FCNet(output_size, output_size, drop=drop, activate
='relu')
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_3 = self.v4q_gate_lin.lin.weight
primals_4 = self.v4q_gate_lin.lin.bias
primals_5 = self.q4v_gate_lin.lin.weight
primals_6 = self.q4v_gate_lin.lin.bias
primals_7 = self.v_lin.lin.weight
primals_8 = self.v_lin.lin.bias
primals_9 = self.q_lin.lin.weight
primals_10 = self.q_lin.lin.bias
primals_11 = self.v_output.lin.weight
primals_12 = self.v_output.lin.bias
primals_13 = self.q_output.lin.weight
primals_14 = self.q_output.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
Ruiver/CTCNet
|
DyIntraModalityUpdate
| false
| 17,903
|
[
"Apache-2.0"
] | 6
|
539e55ec9fed06028379d35dfd5cd4074755ffd8
|
https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8
|
ResnetDecoder
|
import torch
import torch.nn as nn
class ResnetDecoder(nn.Module):
"""
This class represents the tail of ResNet. It performs a global pooling and maps the output to the
correct class by using a fully connected layer.
"""
def __init__(self, in_features, n_classes):
super().__init__()
self.avg = nn.AdaptiveAvgPool1d((1,))
self.decoder = nn.Linear(in_features, n_classes)
def forward(self, x):
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.decoder(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'n_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf1)
del primals_2
del primals_3
return buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
class ResnetDecoderNew(nn.Module):
"""
This class represents the tail of ResNet. It performs a global pooling and maps the output to the
correct class by using a fully connected layer.
"""
def __init__(self, in_features, n_classes):
super().__init__()
self.avg = nn.AdaptiveAvgPool1d((1,))
self.decoder = nn.Linear(in_features, n_classes)
def forward(self, input_0):
primals_2 = self.decoder.weight
primals_3 = self.decoder.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
SeffyVon/ECG_MICResNet
|
ResnetDecoder
| false
| 17,904
|
[
"BSD-3-Clause"
] | 5
|
8c6a319b5822ddfb130738eb1d9cdc3c21b24209
|
https://github.com/SeffyVon/ECG_MICResNet/tree/8c6a319b5822ddfb130738eb1d9cdc3c21b24209
|
Net
|
import torch
import torch.nn as nn
import torch.nn.init as init
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
self.upscale_factor = int(upscale_factor)
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2)
self.conv2_1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_3 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(32, int(self.upscale_factor ** 2),
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(self.upscale_factor)
self.conv5 = nn.Conv2d(1, 1, kernel_size=1, padding=0)
self.weight_init()
def forward(self, x):
out = self.relu(self.conv1(x))
out = self.relu(self.conv2_1(out))
out = self.relu(self.conv2_2(out))
out = self.relu(self.conv2_3(out))
out = self.relu(self.conv3(out))
out = self.relu(self.conv4(out))
out = self.pixel_shuffle(out)
out = self.conv5(out)
return out
def weight_init(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv5.weight)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'upscale_factor': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, None)
tl.store(out_ptr0 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (1, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (1,), (1,))
assert_size_stride(primals_14, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(1048576)](buf5, primals_7,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(1048576)](buf7, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_1[grid(524288)](buf9, primals_11,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf11 = reinterpret_tensor(buf10, (4, 1, 64, 64), (4096, 1, 64, 1), 0)
del buf10
buf14 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(16384)](
buf11, primals_13, buf14, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 1,
64, 64), (4096, 0, 64, 1), 0), primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_3[grid(16384)](buf13, primals_15,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9,
reinterpret_tensor(buf11, (4, 1, 64, 64), (4096, 4096, 64, 1), 0),
buf14)
class NetNew(nn.Module):
def __init__(self, upscale_factor):
super(NetNew, self).__init__()
self.upscale_factor = int(upscale_factor)
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2)
self.conv2_1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_3 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(32, int(self.upscale_factor ** 2),
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(self.upscale_factor)
self.conv5 = nn.Conv2d(1, 1, kernel_size=1, padding=0)
self.weight_init()
def weight_init(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2_3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv5.weight)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2_1.weight
primals_5 = self.conv2_1.bias
primals_6 = self.conv2_2.weight
primals_7 = self.conv2_2.bias
primals_8 = self.conv2_3.weight
primals_9 = self.conv2_3.bias
primals_10 = self.conv3.weight
primals_11 = self.conv3.bias
primals_12 = self.conv4.weight
primals_13 = self.conv4.bias
primals_14 = self.conv5.weight
primals_15 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
PiSchool/esa-superresolution-forecasting
|
Net
| false
| 17,905
|
[
"MIT"
] | 4
|
3c01770dd64749d6b6c40e1068a96a3307c8c035
|
https://github.com/PiSchool/esa-superresolution-forecasting/tree/3c01770dd64749d6b6c40e1068a96a3307c8c035
|
OneSideInterModalityUpdate
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class OneSideInterModalityUpdate(nn.Module):
"""
one-side Inter-Modality Attention Flow
according to the paper, instead of parallel V->Q & Q->V, we first to V->Q and then Q->V
"""
def __init__(self, src_size, tgt_size, output_size, num_head, drop=0.0):
super(OneSideInterModalityUpdate, self).__init__()
self.src_size = src_size
self.tgt_size = tgt_size
self.output_size = output_size
self.num_head = num_head
self.src_lin = FCNet(src_size, output_size * 2, drop=drop, activate
='relu')
self.tgt_lin = FCNet(tgt_size, output_size, drop=drop, activate='relu')
self.tgt_output = FCNet(output_size + tgt_size, output_size, drop=
drop, activate='relu')
def forward(self, src, tgt):
"""
:param src: eeg feature [batch, regions, feature_size]
:param tgt: eye feature [batch, regions, feature_size]
:return:
"""
_batch_size, _num_src = src.shape[0], src.shape[1]
tgt.shape[1]
src_tran = self.src_lin(src)
tgt_tran = self.tgt_lin(tgt)
src_key, src_val = torch.split(src_tran, src_tran.size(2) // 2, dim=2)
tgt_query = tgt_tran
src_key_set = torch.split(src_key, src_key.size(2) // self.num_head,
dim=2)
src_val_set = torch.split(src_val, src_val.size(2) // self.num_head,
dim=2)
tgt_query_set = torch.split(tgt_query, tgt_query.size(2) // self.
num_head, dim=2)
for i in range(self.num_head):
src_key_slice, tgt_query_slice, src_val_slice = src_key_set[i
], tgt_query_set[i], src_val_set[i]
src2tgt = tgt_query_slice @ src_key_slice.transpose(1, 2) / (self
.output_size // self.num_head) ** 0.5
interMAF_src2tgt = F.softmax(src2tgt, dim=2).unsqueeze(3)
tgt_update = (interMAF_src2tgt * src_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((tgt_update, (interMAF_src2tgt *
src_val_slice.unsqueeze(1)).sum(2)), dim=2)
cat_tgt = torch.cat((tgt, tgt_update), dim=2)
tgt_updated = self.tgt_output(cat_tgt)
return tgt_updated
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'src_size': 4, 'tgt_size': 4, 'output_size': 4, 'num_head': 4}
]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 8 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (4 + 32 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (12 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (20 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (28 + 32 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (5 + 32 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (13 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (21 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (29 + 32 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (6 + 32 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (14 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (22 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (30 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_bmm_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 8 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x2 = xindex // 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (7 + 32 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (15 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (23 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (31 + 32 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + 8 * x3), tmp33, xmask)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 8), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf2,
primals_6, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
buf3 = reinterpret_tensor(buf0, (4, 4, 8), (32, 8, 1), 0)
del buf0
buf29 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(128)](buf3,
primals_4, buf29, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_2[grid(16)](buf2, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_3[grid(16)](buf3, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf6, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 16), 0)
del buf5
triton_poi_fused_bmm_5[grid(16)](buf2, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf4, (4, 1, 4), (4, 16, 1), 0)
del buf4
triton_poi_fused_bmm_6[grid(16)](buf3, buf9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf8, buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_7[grid(32)](buf7, buf3, buf11, buf12, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 16), 0)
del buf9
triton_poi_fused_bmm_8[grid(16)](buf2, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf8, (4, 1, 4), (4, 16, 1), 0)
del buf8
triton_poi_fused_bmm_9[grid(16)](buf3, buf14, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf15 = buf7
del buf7
extern_kernels.bmm(buf13, buf14, out=buf15)
buf16 = buf11
del buf11
triton_poi_fused__softmax_4[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_10[grid(48)](buf12, buf16, buf3, buf17, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf12
buf18 = reinterpret_tensor(buf14, (4, 4, 1), (4, 1, 16), 0)
del buf14
triton_poi_fused_bmm_11[grid(16)](buf2, buf18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf19 = reinterpret_tensor(buf13, (4, 1, 4), (4, 16, 1), 0)
del buf13
triton_poi_fused_bmm_12[grid(16)](buf3, buf19, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf20 = buf16
del buf16
extern_kernels.bmm(buf18, buf19, out=buf20)
del buf18
del buf19
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(64)](buf20, buf21, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf22 = reinterpret_tensor(buf24, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_13[grid(64)](buf17, buf21, buf3, buf22, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf17
buf23 = reinterpret_tensor(buf24, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_cat_14[grid(64)](primals_2, buf23, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf25 = reinterpret_tensor(buf21, (16, 4), (4, 1), 0)
del buf21
extern_kernels.mm(reinterpret_tensor(buf24, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf26,
primals_8, buf27, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
return buf26, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 4
), buf10, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 5
), buf15, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 6
), buf20, reinterpret_tensor(buf3, (4, 1, 4, 1), (32, 32, 8, 1), 7
), reinterpret_tensor(buf24, (16, 8), (8, 1), 0
), buf27, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 3
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 2
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 1
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (4, 4, 1), (32, 8, 1), 0), buf28, buf29
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class OneSideInterModalityUpdateNew(nn.Module):
"""
one-side Inter-Modality Attention Flow
according to the paper, instead of parallel V->Q & Q->V, we first to V->Q and then Q->V
"""
def __init__(self, src_size, tgt_size, output_size, num_head, drop=0.0):
super(OneSideInterModalityUpdateNew, self).__init__()
self.src_size = src_size
self.tgt_size = tgt_size
self.output_size = output_size
self.num_head = num_head
self.src_lin = FCNet(src_size, output_size * 2, drop=drop, activate
='relu')
self.tgt_lin = FCNet(tgt_size, output_size, drop=drop, activate='relu')
self.tgt_output = FCNet(output_size + tgt_size, output_size, drop=
drop, activate='relu')
def forward(self, input_0, input_1):
primals_3 = self.src_lin.lin.weight
primals_4 = self.src_lin.lin.bias
primals_5 = self.tgt_lin.lin.weight
primals_6 = self.tgt_lin.lin.bias
primals_7 = self.tgt_output.lin.weight
primals_8 = self.tgt_output.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
Ruiver/CTCNet
|
OneSideInterModalityUpdate
| false
| 17,906
|
[
"Apache-2.0"
] | 6
|
539e55ec9fed06028379d35dfd5cd4074755ffd8
|
https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) +
target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
SeffyVon/ECG_MICResNet
|
DiceLoss
| false
| 17,907
|
[
"BSD-3-Clause"
] | 5
|
8c6a319b5822ddfb130738eb1d9cdc3c21b24209
|
https://github.com/SeffyVon/ECG_MICResNet/tree/8c6a319b5822ddfb130738eb1d9cdc3c21b24209
|
deepmind
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
return x
def get_inputs():
return [torch.rand([4, 4, 144, 144])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 156800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1225 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2,
156800, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 14, 14), (6272, 196, 14, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(25088)](
buf5, primals_7, buf6, 25088, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_7
return reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0
), primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6
class deepmindNew(nn.Module):
def __init__(self):
super(deepmindNew, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Rowing0914/TF2_RL
|
deepmind
| false
| 17,908
|
[
"MIT"
] | 8
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
https://github.com/Rowing0914/TF2_RL/tree/c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
Bias
|
import torch
import torch.nn as nn
class Bias(nn.Module):
def __init__(self):
super(Bias, self).__init__()
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, feat_img, feat_sound):
B, C, H, W = feat_sound.size()
feat_img = feat_img.view(B, 1, C)
z = torch.bmm(feat_img, feat_sound.view(B, C, H * W)).view(B, 1, H, W)
z = z + self.bias
return z
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
z = feat_img.view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img, feat_sound).view(B, HI, WI, HS, WS)
z = z + self.bias
return z
def get_inputs():
return [torch.rand([4, 1, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(primals_2, reinterpret_tensor(primals_1, (4, 4,
16), (64, 16, 1), 0), out=buf0)
del primals_1
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1,
class BiasNew(nn.Module):
def __init__(self):
super(BiasNew, self).__init__()
self.bias = nn.Parameter(torch.zeros(1))
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
z = feat_img.view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img, feat_sound).view(B, HI, WI, HS, WS)
z = z + self.bias
return z
def forward(self, input_0, input_1):
primals_3 = self.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
SheldonTsui/Minus-Plus-Network
|
Bias
| false
| 17,909
|
[
"Apache-2.0"
] | 5
|
7aa281b17f637a9f168aaf250039e560027a3817
|
https://github.com/SheldonTsui/Minus-Plus-Network/tree/7aa281b17f637a9f168aaf250039e560027a3817
|
projection_model
|
import torch
class projection_model(torch.nn.Module):
def __init__(self, neo_hidden, clip_hidden=512):
super(projection_model, self).__init__()
self.fc1 = torch.nn.Linear(neo_hidden, neo_hidden // 2)
self.act = torch.nn.GELU()
self.fc2 = torch.nn.Linear(neo_hidden // 2, clip_hidden)
def forward(self, input_tensor):
out = self.act(self.fc1(input_tensor))
return self.fc2(out)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'neo_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 2), (2, 1))
assert_size_stride(primals_5, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_4, (2, 512), (1, 2), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4
class projection_modelNew(torch.nn.Module):
def __init__(self, neo_hidden, clip_hidden=512):
super(projection_modelNew, self).__init__()
self.fc1 = torch.nn.Linear(neo_hidden, neo_hidden // 2)
self.act = torch.nn.GELU()
self.fc2 = torch.nn.Linear(neo_hidden // 2, clip_hidden)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ShivanshuPurohit/GPT-Neo-visual-grounding
|
projection_model
| false
| 17,910
|
[
"Apache-2.0"
] | 4
|
9c938257a688ef5ae8bc1b87b61d943aa158e880
|
https://github.com/ShivanshuPurohit/GPT-Neo-visual-grounding/tree/9c938257a688ef5ae8bc1b87b61d943aa158e880
|
DSCLoss
|
import torch
import torch.nn as nn
class DSCLoss(nn.Module):
def __init__(self):
super(DSCLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
input_flat * target_flat
numerator = 2 * ((1 - input_flat) * input_flat * target_flat).sum(1
) + smooth
denominator = ((1 - input_flat) * input_flat + target_flat).sum(1
) + smooth
loss = 1 - numerator / denominator
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tmp2 * tmp0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tmp3 + tmp4
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp6 = tmp5 + tmp3
tmp7 = tmp4 / tmp6
tmp8 = tmp3 - tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_rsub_sum_0[grid(4)](arg1_1, arg0_1, buf0,
buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf0, buf1, buf2,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf2,
class DSCLossNew(nn.Module):
def __init__(self):
super(DSCLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
SeffyVon/ECG_MICResNet
|
DSCLoss
| false
| 17,911
|
[
"BSD-3-Clause"
] | 5
|
8c6a319b5822ddfb130738eb1d9cdc3c21b24209
|
https://github.com/SeffyVon/ECG_MICResNet/tree/8c6a319b5822ddfb130738eb1d9cdc3c21b24209
|
TwoMLPHead
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TwoMLPHead(nn.Module):
"""
Standard heads for FPN-based models
Arguments:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
def __init__(self, in_channels, out_channels=1024):
super(TwoMLPHead, self).__init__()
self.fc6 = nn.Linear(in_channels, out_channels)
self.fc7 = nn.Linear(out_channels, out_channels)
def forward(self, x):
x = x.flatten(start_dim=1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (1024, 1024), (1024, 1))
assert_size_stride(primals_5, (1024,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024
), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4096)](buf1, primals_3, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1024, 1024),
(1, 1024), 0), out=buf2)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(4096)](buf3,
primals_5, buf4, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, buf1, buf4, primals_4
class TwoMLPHeadNew(nn.Module):
"""
Standard heads for FPN-based models
Arguments:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
def __init__(self, in_channels, out_channels=1024):
super(TwoMLPHeadNew, self).__init__()
self.fc6 = nn.Linear(in_channels, out_channels)
self.fc7 = nn.Linear(out_channels, out_channels)
def forward(self, input_0):
primals_2 = self.fc6.weight
primals_3 = self.fc6.bias
primals_4 = self.fc7.weight
primals_5 = self.fc7.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Sense-GVT/BigPretrain
|
TwoMLPHead
| false
| 17,912
|
[
"Apache-2.0"
] | 8
|
d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
|
https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
|
InterModalityUpdate
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class InterModalityUpdate(nn.Module):
"""
Inter-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(InterModalityUpdate, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size + v_size, output_size, drop=drop,
activate='relu')
self.q_output = FCNet(output_size + q_size, output_size, drop=drop,
activate='relu')
def forward(self, v, q):
"""
:param v: eeg feature [batch, regions, feature_size]
:param q: eye feature [batch, regions, feature_size]
:return:
"""
_batch_size, _num_obj = v.shape[0], v.shape[1]
q.shape[1]
v_tran = self.v_lin(v)
q_tran = self.q_lin(q)
v_key, v_query, v_val = torch.split(v_tran, v_tran.size(2) // 3, dim=2)
q_key, q_query, q_val = torch.split(q_tran, q_tran.size(2) // 3, dim=2)
v_key_set = torch.split(v_key, v_key.size(2) // self.num_head, dim=2)
v_query_set = torch.split(v_query, v_query.size(2) // self.num_head,
dim=2)
v_val_set = torch.split(v_val, v_val.size(2) // self.num_head, dim=2)
q_key_set = torch.split(q_key, q_key.size(2) // self.num_head, dim=2)
q_query_set = torch.split(q_query, q_query.size(2) // self.num_head,
dim=2)
q_val_set = torch.split(q_val, q_val.size(2) // self.num_head, dim=2)
for i in range(self.num_head):
v_key_slice, v_query_slice, v_val_slice = v_key_set[i
], v_query_set[i], v_val_set[i]
q_key_slice, q_query_slice, q_val_slice = q_key_set[i
], q_query_set[i], q_val_set[i]
q2v = v_query_slice @ q_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
v2q = q_query_slice @ v_key_slice.transpose(1, 2) / (self.
output_size // self.num_head) ** 0.5
interMAF_q2v = F.softmax(q2v, dim=2).unsqueeze(3)
interMAF_v2q = F.softmax(v2q, dim=2).unsqueeze(3)
v_update = (interMAF_q2v * q_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((v_update, (interMAF_q2v *
q_val_slice.unsqueeze(1)).sum(2)), dim=2)
q_update = (interMAF_v2q * v_val_slice.unsqueeze(1)).sum(2
) if i == 0 else torch.cat((q_update, (interMAF_v2q *
v_val_slice.unsqueeze(1)).sum(2)), dim=2)
cat_v = torch.cat((v, v_update), dim=2)
cat_q = torch.cat((q, q_update), dim=2)
updated_v = self.v_output(cat_v)
updated_q = self.q_output(cat_q)
return updated_v, updated_q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'v_size': 4, 'q_size': 4, 'output_size': 4, 'num_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_bmm_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 12 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (5 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x2 = xindex // 8
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (3 + 4 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tmp5 / tmp11
tmp13 = tl.load(in_ptr1 + (8 + 48 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp6 / tmp11
tmp16 = tl.load(in_ptr1 + (20 + 48 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp8 / tmp11
tmp20 = tl.load(in_ptr1 + (32 + 48 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp10 / tmp11
tmp24 = tl.load(in_ptr1 + (44 + 48 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr2 + 4 * x3, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (1 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.load(in_ptr2 + (2 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.load(in_ptr2 + (3 + 4 * x3), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp32 / tmp38
tmp40 = tl.load(in_ptr1 + (9 + 48 * x2), tmp29 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp33 / tmp38
tmp43 = tl.load(in_ptr1 + (21 + 48 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp35 / tmp38
tmp47 = tl.load(in_ptr1 + (33 + 48 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp37 / tmp38
tmp51 = tl.load(in_ptr1 + (45 + 48 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp29, tmp53, tmp54)
tmp56 = tl.where(tmp4, tmp28, tmp55)
tl.store(out_ptr0 + x5, tmp56, xmask)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 12
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (10 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (22 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (34 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (46 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + x5, tmp33, xmask)
@triton.jit
def triton_poi_fused_bmm_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (7 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 12 * x0), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x2 = xindex // 16
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + 4 * x3, tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tmp9 / tmp15
tmp17 = tl.load(in_ptr2 + (11 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 / tmp15
tmp20 = tl.load(in_ptr2 + (23 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp12 / tmp15
tmp24 = tl.load(in_ptr2 + (35 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp14 / tmp15
tmp28 = tl.load(in_ptr2 + (47 + 48 * x2), tmp6 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp6, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp5, tmp32)
tl.store(out_ptr0 + (x0 + 8 * x3), tmp33, xmask)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf0, (4, 4, 12), (48, 12, 1), 0)
del buf0
buf53 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(192)](buf2,
primals_4, buf53, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = reinterpret_tensor(buf1, (4, 4, 12), (48, 12, 1), 0)
del buf1
buf52 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(192)](buf3,
primals_6, buf52, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_bmm_1[grid(16)](buf2, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_bmm_2[grid(16)](buf3, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, buf5, out=buf6)
buf7 = reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 16), 0)
del buf5
triton_poi_fused_bmm_1[grid(16)](buf3, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf4, (4, 1, 4), (4, 16, 1), 0)
del buf4
triton_poi_fused_bmm_2[grid(16)](buf2, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf7, buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf6, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf9, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf8, (4, 4, 1), (4, 1, 16), 0)
del buf8
triton_poi_fused_bmm_4[grid(16)](buf2, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf7, (4, 1, 4), (4, 16, 1), 0)
del buf7
triton_poi_fused_bmm_5[grid(16)](buf3, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf12, buf13, out=buf14)
buf15 = reinterpret_tensor(buf13, (4, 4, 1), (4, 1, 16), 0)
del buf13
triton_poi_fused_bmm_4[grid(16)](buf3, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = reinterpret_tensor(buf12, (4, 1, 4), (4, 16, 1), 0)
del buf12
triton_poi_fused_bmm_5[grid(16)](buf2, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf15, buf16, out=buf17)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf14, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf17, buf19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf10, buf3, buf18, buf20, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_6[grid(32)](buf11, buf2, buf19, buf21, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 16), 0)
del buf16
triton_poi_fused_bmm_7[grid(16)](buf2, buf22, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0)
del buf15
triton_poi_fused_bmm_8[grid(16)](buf3, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf24 = buf19
del buf19
extern_kernels.bmm(buf22, buf23, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0)
del buf23
triton_poi_fused_bmm_7[grid(16)](buf3, buf25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf26 = reinterpret_tensor(buf22, (4, 1, 4), (4, 16, 1), 0)
del buf22
triton_poi_fused_bmm_8[grid(16)](buf2, buf26, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf27 = buf11
del buf11
extern_kernels.bmm(buf25, buf26, out=buf27)
buf28 = buf18
del buf18
triton_poi_fused__softmax_3[grid(64)](buf24, buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = buf10
del buf10
triton_poi_fused__softmax_3[grid(64)](buf27, buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_9[grid(48)](buf20, buf28, buf3, buf30, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf20
buf31 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
triton_poi_fused_cat_9[grid(48)](buf21, buf29, buf2, buf31, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf21
buf32 = reinterpret_tensor(buf26, (4, 4, 1), (4, 1, 16), 0)
del buf26
triton_poi_fused_bmm_10[grid(16)](buf2, buf32, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf25, (4, 1, 4), (4, 16, 1), 0)
del buf25
triton_poi_fused_bmm_11[grid(16)](buf3, buf33, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf34 = buf29
del buf29
extern_kernels.bmm(buf32, buf33, out=buf34)
buf35 = reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 16), 0)
del buf33
triton_poi_fused_bmm_10[grid(16)](buf3, buf35, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf36 = reinterpret_tensor(buf32, (4, 1, 4), (4, 16, 1), 0)
del buf32
triton_poi_fused_bmm_11[grid(16)](buf2, buf36, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf37 = buf28
del buf28
extern_kernels.bmm(buf35, buf36, out=buf37)
del buf35
del buf36
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf34, buf38, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf37, buf39, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf43 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf40 = reinterpret_tensor(buf43, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_12[grid(64)](buf30, buf38, buf3, buf40, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf30
buf45 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf41 = reinterpret_tensor(buf45, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_cat_12[grid(64)](buf31, buf39, buf2, buf41, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf31
buf42 = reinterpret_tensor(buf43, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_cat_13[grid(64)](primals_1, buf42, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf44 = reinterpret_tensor(buf45, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_cat_13[grid(64)](primals_2, buf44, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf46 = reinterpret_tensor(buf39, (16, 4), (4, 1), 0)
del buf39
extern_kernels.mm(reinterpret_tensor(buf43, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf46)
buf47 = reinterpret_tensor(buf46, (4, 4, 4), (16, 4, 1), 0)
del buf46
buf51 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(64)](buf47,
primals_8, buf51, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf48 = reinterpret_tensor(buf38, (16, 4), (4, 1), 0)
del buf38
extern_kernels.mm(reinterpret_tensor(buf45, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf48)
buf49 = reinterpret_tensor(buf48, (4, 4, 4), (16, 4, 1), 0)
del buf48
buf50 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(64)](buf49,
primals_10, buf50, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return buf47, buf49, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), buf6, buf9, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 8), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 8
), buf14, buf17, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 9), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 9
), buf24, buf27, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 10), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 10
), buf34, buf37, reinterpret_tensor(buf3, (4, 1, 4, 1), (48, 48, 12,
1), 11), reinterpret_tensor(buf2, (4, 1, 4, 1), (48, 48, 12, 1), 11
), reinterpret_tensor(buf43, (16, 8), (8, 1), 0), reinterpret_tensor(
buf45, (16, 8), (8, 1), 0
), buf50, primals_9, buf51, primals_7, reinterpret_tensor(buf3, (4,
1, 4), (48, 1, 12), 7), reinterpret_tensor(buf2, (4, 4, 1), (48, 12,
1), 3), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 7
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 3
), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 6
), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 2
), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 6
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 2
), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 5
), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 1
), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 5
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 1
), reinterpret_tensor(buf3, (4, 1, 4), (48, 1, 12), 4
), reinterpret_tensor(buf2, (4, 4, 1), (48, 12, 1), 0
), reinterpret_tensor(buf2, (4, 1, 4), (48, 1, 12), 4
), reinterpret_tensor(buf3, (4, 4, 1), (48, 12, 1), 0), buf52, buf53
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = nn.Linear(in_size, out_size)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class InterModalityUpdateNew(nn.Module):
"""
Inter-Modality Attention Flow
"""
def __init__(self, v_size, q_size, output_size, num_head, drop=0.0):
super(InterModalityUpdateNew, self).__init__()
self.v_size = v_size
self.q_size = q_size
self.output_size = output_size
self.num_head = num_head
self.v_lin = FCNet(v_size, output_size * 3, drop=drop, activate='relu')
self.q_lin = FCNet(q_size, output_size * 3, drop=drop, activate='relu')
self.v_output = FCNet(output_size + v_size, output_size, drop=drop,
activate='relu')
self.q_output = FCNet(output_size + q_size, output_size, drop=drop,
activate='relu')
def forward(self, input_0, input_1):
primals_3 = self.v_lin.lin.weight
primals_4 = self.v_lin.lin.bias
primals_5 = self.q_lin.lin.weight
primals_6 = self.q_lin.lin.bias
primals_7 = self.v_output.lin.weight
primals_8 = self.v_output.lin.bias
primals_9 = self.q_output.lin.weight
primals_10 = self.q_output.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
|
Ruiver/CTCNet
|
InterModalityUpdate
| false
| 17,913
|
[
"Apache-2.0"
] | 6
|
539e55ec9fed06028379d35dfd5cd4074755ffd8
|
https://github.com/Ruiver/CTCNet/tree/539e55ec9fed06028379d35dfd5cd4074755ffd8
|
C3D_mini
|
import torch
import torch.nn as nn
class C3D_mini(nn.Module):
""" The C3D_mini network """
def __init__(self, num_classes=2, pretrained=False):
super(C3D_mini, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 512)
self.fc7 = nn.Linear(512, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 2048 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_15, (512,), (1,))
assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 8192), (8192, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (2, 512), (512, 1))
assert_size_stride(primals_21, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64, 64), (16777216, 262144,
4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2,
67108864, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2,
2], [1, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 64, 32, 32), (8388608, 65536,
1024, 32, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_1[grid(33554432)](buf6, primals_5,
33554432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [1, 2,
2], [1, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1
), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 64, 16, 16), (4194304, 16384,
256, 16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_2[grid(16777216)](buf11,
primals_7, 16777216, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 64, 16, 16), (4194304, 16384,
256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_2[grid(16777216)](buf13,
primals_9, 16777216, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2,
2], [2, 2, 2])
buf15 = buf14[0]
buf16 = buf14[1]
del buf14
buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 512, 32, 8, 8), (1048576, 2048, 64, 8, 1)
)
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_3[grid(4194304)](buf18,
primals_11, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 512, 32, 8, 8), (1048576, 2048, 64, 8, 1)
)
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_3[grid(4194304)](buf20,
primals_13, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2,
2], [2, 2, 2])
buf22 = buf21[0]
buf23 = buf21[1]
del buf21
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 16, 4, 4), (131072, 256, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_4[grid(524288)](buf25, primals_15,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 512, 16, 4, 4), (131072, 256, 16, 4, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_4[grid(524288)](buf27, primals_17,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2,
2], [2, 2, 2], [0, 1, 1])
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = empty_strided_cuda((18, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (18, 8192), (8192, 1),
0), reinterpret_tensor(primals_18, (8192, 512), (1, 8192), 0),
out=buf31)
buf32 = buf31
del buf31
triton_poi_fused_relu_5[grid(9216)](buf32, primals_19, 9216, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_19
buf33 = empty_strided_cuda((18, 2), (2, 1), torch.float32)
extern_kernels.mm(buf32, reinterpret_tensor(primals_20, (512, 2), (
1, 512), 0), out=buf33)
buf34 = buf33
del buf33
buf35 = empty_strided_cuda((18, 2), (2, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(36)](buf34,
primals_21, buf35, 36, XBLOCK=64, num_warps=1, num_stages=1)
del primals_21
return (buf34, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf4,
buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22,
buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (18, 8192), (
8192, 1), 0), buf32, buf35, primals_20, primals_18)
class C3D_miniNew(nn.Module):
""" The C3D_mini network """
def __init__(self, num_classes=2, pretrained=False):
super(C3D_miniNew, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 512)
self.fc7 = nn.Linear(512, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3a.weight
primals_7 = self.conv3a.bias
primals_8 = self.conv3b.weight
primals_9 = self.conv3b.bias
primals_10 = self.conv4a.weight
primals_11 = self.conv4a.bias
primals_12 = self.conv4b.weight
primals_13 = self.conv4b.bias
primals_14 = self.conv5a.weight
primals_15 = self.conv5a.bias
primals_16 = self.conv5b.weight
primals_17 = self.conv5b.bias
primals_18 = self.fc6.weight
primals_19 = self.fc6.bias
primals_20 = self.fc7.weight
primals_21 = self.fc7.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
|
Ontheway361/C3D
|
C3D_mini
| false
| 17,914
|
[
"MIT"
] | 7
|
7aa5364d8c0c6bddc17b1b8939b198fe66e282ca
|
https://github.com/Ontheway361/C3D/tree/7aa5364d8c0c6bddc17b1b8939b198fe66e282ca
|
InnerProd
|
import torch
import torch.nn as nn
class InnerProd(nn.Module):
def __init__(self, fc_dim):
super(InnerProd, self).__init__()
self.scale = nn.Parameter(torch.ones(fc_dim))
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, feat_img, feat_sound):
sound_size = feat_sound.size()
B, C = sound_size[0], sound_size[1]
feat_img = feat_img.view(B, 1, C)
z = torch.bmm(feat_img * self.scale, feat_sound.view(B, C, -1)).view(B,
1, *sound_size[2:])
z = z + self.bias
return z
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
feat_img = feat_img.view(B, C)
z = (feat_img * self.scale).view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img * self.scale, feat_sound).view(B, HI, WI,
HS, WS)
z = z + self.bias
return z
def get_inputs():
return [torch.rand([4, 1, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fc_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, primals_3, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(primals_1, (4, 4, 16),
(64, 16, 1), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
return buf2, primals_2, reinterpret_tensor(primals_1, (4, 16, 4), (64,
1, 16), 0)
class InnerProdNew(nn.Module):
def __init__(self, fc_dim):
super(InnerProdNew, self).__init__()
self.scale = nn.Parameter(torch.ones(fc_dim))
self.bias = nn.Parameter(torch.zeros(1))
def forward_nosum(self, feat_img, feat_sound):
B, C, _H, _W = feat_sound.size()
feat_img = feat_img.view(B, C)
z = (feat_img * self.scale).view(B, C, 1, 1) * feat_sound
z = z + self.bias
return z
def forward_pixelwise(self, feats_img, feat_sound):
B, C, HI, WI = feats_img.size()
B, C, HS, WS = feat_sound.size()
feats_img = feats_img.view(B, C, HI * WI)
feats_img = feats_img.transpose(1, 2)
feat_sound = feat_sound.view(B, C, HS * WS)
z = torch.bmm(feats_img * self.scale, feat_sound).view(B, HI, WI,
HS, WS)
z = z + self.bias
return z
def forward(self, input_0, input_1):
primals_3 = self.scale
primals_4 = self.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
SheldonTsui/Minus-Plus-Network
|
InnerProd
| false
| 17,915
|
[
"Apache-2.0"
] | 5
|
7aa281b17f637a9f168aaf250039e560027a3817
|
https://github.com/SheldonTsui/Minus-Plus-Network/tree/7aa281b17f637a9f168aaf250039e560027a3817
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Actor(nn.Module):
def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300,
init_w=0.003):
super(Actor, self).__init__()
self.fc1 = nn.Linear(nb_states, hidden1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc3 = nn.Linear(hidden2, nb_actions)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
out = self.sigmoid(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nb_states': 4, 'nb_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (4, 300), (300, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf8, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf7, 19200, XBLOCK=128, num_warps=4, num_stages=1
)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1,
300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_sigmoid_3[grid(256)](buf6, primals_7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), buf4, buf6, primals_6, buf7, primals_4, buf8
class ActorNew(nn.Module):
def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300,
init_w=0.003):
super(ActorNew, self).__init__()
self.fc1 = nn.Linear(nb_states, hidden1)
self.fc2 = nn.Linear(hidden1, hidden2)
self.fc3 = nn.Linear(hidden2, nb_actions)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Sharpiless/HAQ-for-Mobilenetv3-Quantization
|
Actor
| false
| 17,916
|
[
"MIT"
] | 5
|
76b7d98471adb666ad140abd2518bce6f0de3cfa
|
https://github.com/Sharpiless/HAQ-for-Mobilenetv3-Quantization/tree/76b7d98471adb666ad140abd2518bce6f0de3cfa
|
FeedForward
|
import math
import torch
import torch.nn as nn
def activation(act_type='swish'):
if act_type == 'swish':
act = swish()
return act
else:
act = nn.ReLU(inplace=True)
return act
class swish(nn.Module):
def __init__(self):
super(swish, self).__init__()
def forward(self, x):
x = x * torch.sigmoid(x)
return x
class GELU(nn.Module):
"""
Gaussian Error Linear Units, based on
`"Gaussian Error Linear Units (GELUs)" <https://arxiv.org/abs/1606.08415>`
"""
def __init__(self, approximate=True):
super(GELU, self).__init__()
self.approximate = approximate
def forward(self, x):
if self.approximate:
cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
return x * cdf
else:
return x * (torch.erf(x / math.sqrt(2)) + 1) / 2
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.1, activation=GELU):
super(FeedForward, self).__init__()
self.mlp1 = nn.Linear(dim, hidden_dim)
self.act = activation()
self.mlp2 = nn.Linear(hidden_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.mlp1(x)
x = self.act(x)
x = self.dropout(x)
x = self.mlp2(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp5 * tmp6
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
def activation(act_type='swish'):
if act_type == 'swish':
act = swish()
return act
else:
act = nn.ReLU(inplace=True)
return act
class swish(nn.Module):
def __init__(self):
super(swish, self).__init__()
def forward(self, x):
x = x * torch.sigmoid(x)
return x
class GELU(nn.Module):
"""
Gaussian Error Linear Units, based on
`"Gaussian Error Linear Units (GELUs)" <https://arxiv.org/abs/1606.08415>`
"""
def __init__(self, approximate=True):
super(GELU, self).__init__()
self.approximate = approximate
def forward(self, x):
if self.approximate:
cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
return x * cdf
else:
return x * (torch.erf(x / math.sqrt(2)) + 1) / 2
class FeedForwardNew(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.1, activation=GELU):
super(FeedForwardNew, self).__init__()
self.mlp1 = nn.Linear(dim, hidden_dim)
self.act = activation()
self.mlp2 = nn.Linear(hidden_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_1 = self.mlp1.weight
primals_2 = self.mlp1.bias
primals_4 = self.mlp2.weight
primals_5 = self.mlp2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Sense-GVT/BigPretrain
|
FeedForward
| false
| 17,917
|
[
"Apache-2.0"
] | 8
|
d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
|
https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
|
SIMPA
|
import torch
from typing import Optional
from typing import Tuple
import torch.nn as nn
from torch.nn.parameter import Parameter
from typing import Union
class SIMPA(nn.Module):
"""The signed mixed-path aggregation model.
Args:
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
"""
def __init__(self, hop: 'int', directed: 'bool'=False):
super(SIMPA, self).__init__()
self._hop_p = hop + 1
self._hop_n = int((1 + hop) * hop / 2)
self._undirected = not directed
if self._undirected:
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_undirected()
else:
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
def forward(self, A_p:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', A_n:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', x_p:
'torch.FloatTensor', x_n: 'torch.FloatTensor', x_pt:
'Optional[torch.FloatTensor]'=None, x_nt:
'Optional[torch.FloatTensor]'=None, A_pt:
'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'=None,
A_nt: 'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'
=None) ->Tuple[torch.FloatTensor, torch.FloatTensor, torch.
LongTensor, torch.FloatTensor]:
"""
Making a forward pass of SIMPA.
Arg types:
* **A_p** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized positive part of the adjacency matrix.
* **A_n** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized negative part of the adjacency matrix.
* **x_p** (PyTorch FloatTensor) - Souce positive hidden representations.
* **x_n** (PyTorch FloatTensor) - Souce negative hidden representations.
* **x_pt** (PyTorch FloatTensor, optional) - Target positive hidden representations. Default: None.
* **x_nt** (PyTorch FloatTensor, optional) - Target negative hidden representations. Default: None.
* **A_pt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
positive part of the adjacency matrix. Default: None.
* **A_nt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
negative part of the adjacency matrix. Default: None.
Return types:
* **feat** (PyTorch FloatTensor) - Embedding matrix, with shape (num_nodes, 2*input_dim) for undirected graphs
and (num_nodes, 4*input_dim) for directed graphs.
"""
if self._undirected:
feat_p = self._w_p[0] * x_p
feat_n = torch.zeros_like(feat_p)
curr_p = x_p.clone()
curr_n_aux = x_n.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_p = torch.matmul(A_p, curr_p)
curr_n_aux = torch.matmul(A_p, curr_n_aux)
feat_p += self._w_p[h] * curr_p
if h != self._hop_p - 1:
curr_n = torch.matmul(A_n, curr_n_aux)
feat_n += self._w_n[j] * curr_n
j += 1
for _ in range(self._hop_p - 2 - h):
curr_n = torch.matmul(A_p, curr_n)
feat_n += self._w_n[j] * curr_n
j += 1
feat = torch.cat([feat_p, feat_n], dim=1)
else:
A_sp = A_p
A_sn = A_n
A_tp = A_pt
A_tn = A_nt
x_sp = x_p
x_sn = x_n
feat_sp = self._w_sp[0] * x_sp
feat_sn = torch.zeros_like(feat_sp)
feat_tp = self._w_tp[0] * x_pt
feat_tn = torch.zeros_like(feat_tp)
curr_sp = x_sp.clone()
curr_sn_aux = x_sn.clone()
curr_tp = x_pt.clone()
curr_tn_aux = x_nt.clone()
j = 0
for h in range(0, self._hop_p):
if h > 0:
curr_sp = torch.matmul(A_sp, curr_sp)
curr_sn_aux = torch.matmul(A_sp, curr_sn_aux)
curr_tp = torch.matmul(A_tp, curr_tp)
curr_tn_aux = torch.matmul(A_tp, curr_tn_aux)
feat_sp += self._w_sp[h] * curr_sp
feat_tp += self._w_tp[h] * curr_tp
if h != self._hop_p - 1:
curr_sn = torch.matmul(A_sn, curr_sn_aux)
curr_tn = torch.matmul(A_tn, curr_tn_aux)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
for _ in range(self._hop_p - 2 - h):
curr_sn = torch.matmul(A_sp, curr_sn)
curr_tn = torch.matmul(A_tp, curr_tn)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
feat = torch.cat([feat_sp, feat_sn, feat_tp, feat_tn], dim=1)
return feat
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hop': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 64
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr0 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp9 = tl.load(in_ptr0 + 2)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + x0, xmask)
tmp14 = tl.load(in_ptr0 + 3)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + x0, xmask)
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + x0, xmask)
tmp24 = tl.load(in_ptr0 + 5)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp26 = tl.load(in_ptr6 + x0, xmask)
tmp29 = tl.load(in_ptr0 + 6)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp31 = tl.load(in_ptr7 + x0, xmask)
tmp34 = tl.load(in_ptr0 + 7)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp36 = tl.load(in_ptr8 + x0, xmask)
tmp39 = tl.load(in_ptr0 + 8)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp41 = tl.load(in_ptr9 + x0, xmask)
tmp44 = tl.load(in_ptr0 + 9)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK])
tmp46 = tl.load(in_ptr10 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp27 = tmp25 * tmp26
tmp28 = tmp23 + tmp27
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp33 + tmp37
tmp42 = tmp40 * tmp41
tmp43 = tmp38 + tmp42
tmp47 = tmp45 * tmp46
tmp48 = tmp43 + tmp47
tl.store(out_ptr0 + (x1 + 128 * x2), tmp48, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr0 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr0 + 2)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp11 = tl.load(in_ptr3 + x2, xmask)
tmp14 = tl.load(in_ptr0 + 3)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp16 = tl.load(in_ptr4 + x2, xmask)
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp21 = tl.load(in_ptr5 + x2, xmask)
tmp3 = tmp1 * tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tl.store(out_ptr0 + (x0 + 128 * x1), tmp23, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (5, 1), (1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (10, 1), (1, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf0, out=buf1)
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf1, out=buf2)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf2, out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0
), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0
), out=buf5)
del primals_3
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), buf5, out=buf6)
buf11 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf5, out=buf11)
buf12 = buf5
del buf5
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), buf11, out=buf12)
buf13 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf12, out=buf13)
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf11, out=buf16)
buf17 = buf11
del buf11
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), buf16, out=buf17)
del primals_4
buf8 = buf16
del buf16
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf6, out=buf8)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf8, out=buf9)
buf21 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
buf20 = reinterpret_tensor(buf21, (4, 4, 4, 4), (128, 16, 4, 1), 64)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_5, buf0, buf1, buf2,
buf3, buf6, buf8, buf9, buf12, buf13, buf17, buf20, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_5
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf4, out=buf10)
buf15 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf10, out=buf15)
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4,
1), 0), buf15, out=buf18)
del primals_6
buf19 = reinterpret_tensor(buf21, (4, 4, 4, 4), (128, 16, 4, 1), 0)
triton_poi_fused_add_mul_1[grid(256)](primals_1, primals_2, buf4,
buf10, buf15, buf18, buf19, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_1
return (buf21, primals_2, buf0, buf1, buf2, buf3, buf4, buf6, buf8,
buf9, buf10, buf12, buf13, buf15, buf17, buf18)
class SIMPANew(nn.Module):
"""The signed mixed-path aggregation model.
Args:
hop (int): Number of hops to consider.
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
"""
def __init__(self, hop: 'int', directed: 'bool'=False):
super(SIMPANew, self).__init__()
self._hop_p = hop + 1
self._hop_n = int((1 + hop) * hop / 2)
self._undirected = not directed
if self._undirected:
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_undirected()
else:
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self._w_p
primals_5 = self._w_n
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
primals_6 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
SherylHYX/SSSNET_Signed_Clustering
|
SIMPA
| false
| 17,918
|
[
"MIT"
] | 5
|
85736c18e86b396d64177d22b8c7f9859dfd794c
|
https://github.com/SherylHYX/SSSNET_Signed_Clustering/tree/85736c18e86b396d64177d22b8c7f9859dfd794c
|
SparseConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
class Sparse(autograd.Function):
""""
Prune the unimprotant weight for the forwards phase,
but pass the gradient to dense weight using SR-STE in the backwards phase
"""
@staticmethod
def forward(ctx, weight, N, M, decay=0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length / M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M - N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
ctx.mask = w_b
ctx.decay = decay
return output * w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1 - ctx.mask) * weight, None, None
class SparseConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N
=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias,
padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch import autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = r1
tmp3 = tmp2.to(tl.int16)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
_tmp6, tmp7 = triton_helpers.sort_with_index(tmp4, tmp5, None, 1,
stable=False, descending=False)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp7, xmask)
@triton.jit
def triton_poi_fused_ones_scatter_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_ones_scatter_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tmp0.to(tl.int64)
tl.device_assert((0 <= tmp1) & (tmp1 < 4) | ~xmask,
'index out of bounds: 0 <= tmp1 < 4')
tmp3 = 0.0
tl.store(out_ptr0 + (tmp1 + 4 * x1), tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_rsub_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = 0.0002
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp0
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.int16)
get_raw_stream(0)
triton_per_fused_sort_0[grid(64)](primals_1, buf1, 64, 4, XBLOCK=32,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_ones_scatter_1[grid(256)](buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
triton_poi_fused_ones_scatter_2[grid(128)](buf1, buf2, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_rsub_3[grid(256)](primals_1, buf2, buf4, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del primals_1
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_4[grid(16)](buf6, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, primals_3, buf4, buf7
class Sparse(autograd.Function):
""""
Prune the unimprotant weight for the forwards phase,
but pass the gradient to dense weight using SR-STE in the backwards phase
"""
@staticmethod
def forward(ctx, weight, N, M, decay=0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length / M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M - N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
ctx.mask = w_b
ctx.decay = decay
return output * w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1 - ctx.mask) * weight, None, None
class SparseConv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N
=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias,
padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sense-GVT/BigPretrain
|
SparseConv2d
| false
| 17,920
|
[
"Apache-2.0"
] | 8
|
d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
|
https://github.com/Sense-GVT/BigPretrain/tree/d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
|
GCNConv_diag
|
import torch
from sklearn.metrics.pairwise import *
from torch.optim.lr_scheduler import *
class GCNConv_diag(torch.nn.Module):
"""
A GCN convolution layer of diagonal matrix multiplication
"""
def __init__(self, input_size, device):
super(GCNConv_diag, self).__init__()
self.W = torch.nn.Parameter(torch.ones(input_size))
self.input_size = input_size
def init_para(self):
self.W = torch.nn.Parameter(torch.ones(self.input_size))
def forward(self, input, A, sparse=False):
hidden = input @ torch.diag(self.W)
if sparse:
output = torch.sparse.mm(A, hidden)
else:
output = torch.matmul(A, hidden)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from sklearn.metrics.pairwise import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_diag_embed_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
buf0, out=buf1)
del buf0
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0),
out=buf2)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
class GCNConv_diagNew(torch.nn.Module):
"""
A GCN convolution layer of diagonal matrix multiplication
"""
def __init__(self, input_size, device):
super(GCNConv_diagNew, self).__init__()
self.W = torch.nn.Parameter(torch.ones(input_size))
self.input_size = input_size
def init_para(self):
self.W = torch.nn.Parameter(torch.ones(self.input_size))
def forward(self, input_0, input_1):
primals_1 = self.W
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
STK101/GRCN
|
GCNConv_diag
| false
| 17,921
|
[
"MIT"
] | 4
|
7389000a13d5969bcc77dc4cf73a4107acc68403
|
https://github.com/STK101/GRCN/tree/7389000a13d5969bcc77dc4cf73a4107acc68403
|
Balance_Theory
|
import torch
from typing import Optional
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from typing import Union
class Balance_Theory(nn.Module):
"""The signed graph clustering model with balance theory, restricted to 2 hops for fair comparison with SSSNET.
Args:
nfeat (int): Number of features.
hidden (int): Hidden dimensions of the initial MLP.
nclass (int): Number of clusters.
dropout (float): Dropout probability.
hop (int): Number of hops to consider. (need to be 2)
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, nfeat: 'int', hidden: 'int', nclass: 'int', dropout:
'float', hop: 'int', directed: 'bool'=False, bias: 'bool'=True):
super(Balance_Theory, self).__init__()
nh1 = hidden
nh2 = hidden
self._num_clusters = int(nclass)
assert hop == 2, 'please only use 2 hops'
self._hop_p = 4
self._hop_n = 3
if bias:
self._bias = Parameter(torch.FloatTensor(self._num_clusters))
else:
self.register_parameter('_bias', None)
self._relu = nn.ReLU()
self._dropout = nn.Dropout(p=dropout)
self._undirected = not directed
if self._undirected:
self._w_p0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_p1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_n0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_n1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(2 * nh2, self.
_num_clusters))
self._reset_parameters_undirected()
else:
self._w_sp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(4 * nh2, self.
_num_clusters))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_p0, gain=1.414)
nn.init.xavier_uniform_(self._w_p1, gain=1.414)
nn.init.xavier_uniform_(self._w_n0, gain=1.414)
nn.init.xavier_uniform_(self._w_n1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_sp0, gain=1.414)
nn.init.xavier_uniform_(self._w_sp1, gain=1.414)
nn.init.xavier_uniform_(self._w_sn0, gain=1.414)
nn.init.xavier_uniform_(self._w_sn1, gain=1.414)
nn.init.xavier_uniform_(self._w_tp0, gain=1.414)
nn.init.xavier_uniform_(self._w_tp1, gain=1.414)
nn.init.xavier_uniform_(self._w_tn0, gain=1.414)
nn.init.xavier_uniform_(self._w_tn1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def forward(self, A_p:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', A_n:
'Union[torch.FloatTensor, torch.sparse_coo_tensor]', features:
'torch.FloatTensor', A_pt:
'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'=None,
A_nt: 'Optional[Union[torch.FloatTensor, torch.sparse_coo_tensor]]'
=None) ->Tuple[torch.FloatTensor, torch.FloatTensor, torch.
LongTensor, torch.FloatTensor]:
"""
Making a forward pass of the signed graph clustering model with balance theory.
Arg types:
* **A_p** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized positive part of the adjacency matrix.
* **A_n** (PyTorch FloatTensor or PyTorch sparse_coo_tensor) - Row-normalized negative part of the adjacency matrix.
* **features** (PyTorch FloatTensor) - Input node features, with shape (num_nodes, num_features).
* **A_pt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
positive part of the adjacency matrix. Default: None.
* **A_nt** (PyTorch FloatTensor or PyTorch sparse_coo_tensor, optional) - Transpose of column-normalized
negative part of the adjacency matrix. Default: None.
Return types:
* **z** (PyTorch FloatTensor) - Embedding matrix, with shape (num_nodes, 2*hidden) for undirected graphs
and (num_nodes, 4*hidden) for directed graphs.
* **output** (PyTorch FloatTensor) - Log of prob, with shape (num_nodes, num_clusters).
* **predictions_cluster** (PyTorch LongTensor) - Predicted labels.
* **prob** (PyTorch FloatTensor) - Probability assignment matrix of different clusters, with shape (num_nodes, num_clusters).
"""
if self._undirected:
x_p = torch.mm(features, self._w_p0)
x_p = self._relu(x_p)
x_p = self._dropout(x_p)
x_p = torch.mm(x_p, self._w_p1)
x_n = torch.mm(features, self._w_n0)
x_n = self._relu(x_n)
x_n = self._dropout(x_n)
x_n = torch.mm(x_n, self._w_n1)
feat_p = self._w_p[0] * x_p
feat_n = torch.zeros_like(feat_p)
curr_p = x_p.clone()
curr_n_aux = x_n.clone()
j = 0
for h in range(0, self._hop_p - 1):
if h > 0:
curr_p = torch.matmul(A_p, curr_p)
curr_n_aux = torch.matmul(A_p, curr_n_aux)
feat_p += self._w_p[h] * curr_p
if h != self._hop_p - 2:
curr_n = torch.matmul(A_n, curr_n_aux)
feat_n += self._w_n[j] * curr_n
j += 1
for _ in range(self._hop_p - 3 - h):
curr_n = torch.matmul(A_p, curr_n)
feat_n += self._w_n[j] * curr_n
j += 1
feat_p += self._w_p[3] * torch.matmul(A_n, torch.matmul(A_n, x_p))
feat = torch.cat([feat_p, feat_n], dim=1)
else:
x_sp = torch.mm(features, self._w_sp0)
x_sp = self._relu(x_sp)
x_sp = self._dropout(x_sp)
x_sp = torch.mm(x_sp, self._w_sp1)
x_sn = torch.mm(features, self._w_sn0)
x_sn = self._relu(x_sn)
x_sn = self._dropout(x_sn)
x_sn = torch.mm(x_sn, self._w_sn1)
x_tp = torch.mm(features, self._w_tp0)
x_tp = self._relu(x_tp)
x_tp = self._dropout(x_tp)
x_tp = torch.mm(x_tp, self._w_tp1)
x_tn = torch.mm(features, self._w_tn0)
x_tn = self._relu(x_tn)
x_tn = self._dropout(x_tn)
x_tn = torch.mm(x_tn, self._w_tn1)
A_sp = A_p
A_sn = A_n
A_tp = A_pt
A_tn = A_nt
feat_sp = self._w_sp[0] * x_sp
feat_sn = torch.zeros_like(feat_sp)
feat_tp = self._w_tp[0] * x_tp
feat_tn = torch.zeros_like(feat_tp)
curr_sp = x_sp.clone()
curr_sn_aux = x_sn.clone()
curr_tp = x_tp.clone()
curr_tn_aux = x_tn.clone()
j = 0
for h in range(0, self._hop_p - 1):
if h > 0:
curr_sp = torch.matmul(A_sp, curr_sp)
curr_sn_aux = torch.matmul(A_sp, curr_sn_aux)
curr_tp = torch.matmul(A_tp, curr_tp)
curr_tn_aux = torch.matmul(A_tp, curr_tn_aux)
feat_sp += self._w_sp[h] * curr_sp
feat_tp += self._w_tp[h] * curr_tp
if h != self._hop_p - 2:
curr_sn = torch.matmul(A_sn, curr_sn_aux)
curr_tn = torch.matmul(A_tn, curr_tn_aux)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
for _ in range(self._hop_p - 3 - h):
curr_sn = torch.matmul(A_sp, curr_sn)
curr_tn = torch.matmul(A_tp, curr_tn)
feat_sn += self._w_sn[j] * curr_sn
feat_tn += self._w_tn[j] * curr_tn
j += 1
feat_sp += self._w_sp[3] * torch.matmul(A_sn, torch.matmul(A_sn,
x_sp))
feat_tp += self._w_tp[3] * torch.matmul(A_tn, torch.matmul(A_tn,
x_tp))
feat = torch.cat([feat_sp, feat_sn, feat_tp, feat_tn], dim=1)
z = feat
output = torch.mm(z, self._W_prob)
if self._bias is not None:
output = output + self._bias
predictions_cluster = torch.argmax(output, dim=1)
prob = F.softmax(output, dim=1)
output = F.log_softmax(output, dim=1)
return F.normalize(z), output, predictions_cluster, prob
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'hidden': 4, 'nclass': 4, 'dropout': 0.5,
'hop': 2}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_cat_div_linalg_vector_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr0 + 3)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp29 = tl.load(in_ptr5 + 0)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp33 = tl.load(in_ptr5 + 1)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp38 = tl.load(in_ptr5 + 2)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp11 = tl.load(in_ptr2 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tl.load(in_ptr3 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tl.load(in_ptr4 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp31 = tl.load(in_ptr6 + (4 * x0 + (-4 + r1)), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp30 * tmp31
tmp35 = tl.load(in_ptr7 + (4 * x0 + (-4 + r1)), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 * tmp35
tmp37 = tmp32 + tmp36
tmp40 = tl.load(in_ptr8 + (4 * x0 + (-4 + r1)), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp37 + tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp26, tmp42, tmp43)
tmp45 = tl.where(tmp4, tmp25, tmp44)
tmp46 = tmp45 * tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.where(xmask, tmp47, 0)
tmp50 = tl.sum(tmp49, 1)[:, None]
tmp51 = libdevice.sqrt(tmp50)
tmp52 = 1e-12
tmp53 = triton_helpers.maximum(tmp51, tmp52)
tmp54 = tmp45 / tmp53
tl.store(out_ptr0 + (r1 + 8 * x0), tmp45, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp51, xmask)
tl.store(out_ptr1 + (r1 + 8 * x0), tmp54, xmask)
@triton.jit
def triton_poi_fused_argmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tmp14 = tl_math.log(tmp12)
tmp15 = tmp0 - tmp14
tl.store(out_ptr0 + x2, tmp13, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (3, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_4, out=buf3)
del primals_4
buf4 = buf3
del buf3
triton_poi_fused_relu_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, primals_5, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf2, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_9, buf5, out=buf9)
buf10 = buf5
del buf5
extern_kernels.mm(primals_7, buf9, out=buf10)
buf11 = buf9
del buf9
extern_kernels.mm(primals_9, buf8, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, buf2, out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, buf12, out=buf13)
buf14 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = reinterpret_tensor(buf20, (4, 1), (1, 1), 0)
del buf20
buf22 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_per_fused_cat_div_linalg_vector_norm_1[grid(4)](buf21,
primals_6, buf2, buf8, buf11, buf13, primals_8, buf6, buf7,
buf10, buf14, buf22, 4, 8, XBLOCK=1, num_warps=2, num_stages=1)
buf15 = buf12
del buf12
extern_kernels.addmm(primals_11, buf14, primals_10, alpha=1, beta=1,
out=buf15)
del primals_11
buf16 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_argmax_2[grid(4)](buf15, buf16, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf15, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf18 = buf15
del buf15
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_4[grid(16)](buf17, buf18,
buf19, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf17
return (buf22, buf19, buf16, buf18, primals_6, primals_8, buf1, buf2,
buf4, buf6, buf7, buf8, buf10, buf11, buf13, buf14, buf18, buf19,
buf21, reinterpret_tensor(primals_10, (4, 8), (1, 4), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0))
class Balance_TheoryNew(nn.Module):
"""The signed graph clustering model with balance theory, restricted to 2 hops for fair comparison with SSSNET.
Args:
nfeat (int): Number of features.
hidden (int): Hidden dimensions of the initial MLP.
nclass (int): Number of clusters.
dropout (float): Dropout probability.
hop (int): Number of hops to consider. (need to be 2)
directed (bool, optional): Whether the input network is directed or not. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, nfeat: 'int', hidden: 'int', nclass: 'int', dropout:
'float', hop: 'int', directed: 'bool'=False, bias: 'bool'=True):
super(Balance_TheoryNew, self).__init__()
nh1 = hidden
nh2 = hidden
self._num_clusters = int(nclass)
assert hop == 2, 'please only use 2 hops'
self._hop_p = 4
self._hop_n = 3
if bias:
self._bias = Parameter(torch.FloatTensor(self._num_clusters))
else:
self.register_parameter('_bias', None)
self._relu = nn.ReLU()
self._dropout = nn.Dropout(p=dropout)
self._undirected = not directed
if self._undirected:
self._w_p0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_p1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_n0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_n1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_p = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_n = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(2 * nh2, self.
_num_clusters))
self._reset_parameters_undirected()
else:
self._w_sp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_sn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tp0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tp1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_tn0 = Parameter(torch.FloatTensor(nfeat, nh1))
self._w_tn1 = Parameter(torch.FloatTensor(nh1, nh2))
self._w_sp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_sn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._w_tp = Parameter(torch.FloatTensor(self._hop_p, 1))
self._w_tn = Parameter(torch.FloatTensor(self._hop_n, 1))
self._W_prob = Parameter(torch.FloatTensor(4 * nh2, self.
_num_clusters))
self._reset_parameters_directed()
def _reset_parameters_undirected(self):
self._w_p.data.fill_(1.0)
self._w_n.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_p0, gain=1.414)
nn.init.xavier_uniform_(self._w_p1, gain=1.414)
nn.init.xavier_uniform_(self._w_n0, gain=1.414)
nn.init.xavier_uniform_(self._w_n1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def _reset_parameters_directed(self):
self._w_sp.data.fill_(1.0)
self._w_sn.data.fill_(1.0)
self._w_tp.data.fill_(1.0)
self._w_tn.data.fill_(1.0)
nn.init.xavier_uniform_(self._w_sp0, gain=1.414)
nn.init.xavier_uniform_(self._w_sp1, gain=1.414)
nn.init.xavier_uniform_(self._w_sn0, gain=1.414)
nn.init.xavier_uniform_(self._w_sn1, gain=1.414)
nn.init.xavier_uniform_(self._w_tp0, gain=1.414)
nn.init.xavier_uniform_(self._w_tp1, gain=1.414)
nn.init.xavier_uniform_(self._w_tn0, gain=1.414)
nn.init.xavier_uniform_(self._w_tn1, gain=1.414)
if self._bias is not None:
self._bias.data.fill_(0.0)
nn.init.xavier_uniform_(self._W_prob, gain=1.414)
def forward(self, input_0, input_1, input_2):
primals_11 = self._bias
primals_1 = self._w_p0
primals_2 = self._w_p1
primals_3 = self._w_n0
primals_4 = self._w_n1
primals_6 = self._w_p
primals_8 = self._w_n
primals_10 = self._W_prob
primals_5 = input_0
primals_7 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2], output[3]
|
SherylHYX/SSSNET_Signed_Clustering
|
Balance_Theory
| false
| 17,922
|
[
"MIT"
] | 5
|
85736c18e86b396d64177d22b8c7f9859dfd794c
|
https://github.com/SherylHYX/SSSNET_Signed_Clustering/tree/85736c18e86b396d64177d22b8c7f9859dfd794c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.