entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
NonLinearProbe3
|
import torch
from torch import nn
class NonLinearProbe3(nn.Module):
def __init__(self, input_dim, num_classes=255):
super().__init__()
self.linear = nn.Linear(in_features=input_dim, out_features=num_classes
)
self.sigmoid = nn.Sigmoid()
def forward(self, feature_vectors):
return self.sigmoid(self.linear(feature_vectors))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 255
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (255, 4), (4, 1))
assert_size_stride(primals_2, (255,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 255), (255, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 255), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 255), (4080, 1020, 255, 1), 0
)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(16320)](buf1, primals_2, 16320,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class NonLinearProbe3New(nn.Module):
def __init__(self, input_dim, num_classes=255):
super().__init__()
self.linear = nn.Linear(in_features=input_dim, out_features=num_classes
)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
PAL-ML/atari-representation-learning
|
NonLinearProbe3
| false
| 2,802
|
[
"MIT"
] | 0
|
11977da174d9ef74c0b2333322b9f0b28e15239e
|
https://github.com/PAL-ML/atari-representation-learning/tree/11977da174d9ef74c0b2333322b9f0b28e15239e
|
NonLinearProbe1
|
import torch
from torch import nn
class NonLinearProbe1(nn.Module):
def __init__(self, input_dim, num_classes=255):
super().__init__()
self.linear = nn.Linear(in_features=input_dim, out_features=num_classes
)
self.relu = nn.ReLU()
def forward(self, feature_vectors):
return self.relu(self.linear(feature_vectors))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 255
x2 = xindex % 4080
x3 = xindex // 4080
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 4096 * x3), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (255, 4), (4, 1))
assert_size_stride(primals_2, (255,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 255), (255, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 255), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 255), (4080, 1020, 255, 1), 0
)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 255), (4096, 1020, 255, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16320)](buf1,
primals_2, buf2, 16320, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class NonLinearProbe1New(nn.Module):
def __init__(self, input_dim, num_classes=255):
super().__init__()
self.linear = nn.Linear(in_features=input_dim, out_features=num_classes
)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
PAL-ML/atari-representation-learning
|
NonLinearProbe1
| false
| 2,803
|
[
"MIT"
] | 0
|
11977da174d9ef74c0b2333322b9f0b28e15239e
|
https://github.com/PAL-ML/atari-representation-learning/tree/11977da174d9ef74c0b2333322b9f0b28e15239e
|
L2N
|
import torch
import torch.nn as nn
def l2n(x, eps=1e-06):
return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x)
class L2N(nn.Module):
def __init__(self, eps=1e-06):
super(L2N, self).__init__()
self.eps = eps
def forward(self, x):
return l2n(x, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'eps=' + str(self.eps) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def l2n(x, eps=1e-06):
return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x)
class L2NNew(nn.Module):
def __init__(self, eps=1e-06):
super(L2NNew, self).__init__()
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(' + 'eps=' + str(self.eps) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SamSweere/CV-PIRE
|
L2N
| false
| 2,804
|
[
"MIT"
] | 0
|
d857167b3058cb51d10662150c6a4ba3c85f2903
|
https://github.com/SamSweere/CV-PIRE/tree/d857167b3058cb51d10662150c6a4ba3c85f2903
|
InstanceNorm2d
|
import torch
from torch import nn as nn
from torch.nn import init as init
from torchvision.models import vgg as vgg
import torch.utils.data
from torch.utils import data as data
from torch import autograd as autograd
class InstanceNorm2d(nn.Module):
def __init__(self, epsilon=1e-08, **kwargs):
super().__init__(**kwargs)
self.epsilon = epsilon
def forward(self, x):
tmp = torch.mul(x, x)
tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)
return x * tmp
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn as nn
from torch.nn import init as init
from torchvision.models import vgg as vgg
import torch.utils.data
from torch.utils import data as data
from torch import autograd as autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_rsqrt_0(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp0 * tmp10
tl.store(out_ptr1 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_rsqrt_0[grid(16)](arg0_1, buf1, 16,
16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class InstanceNorm2dNew(nn.Module):
def __init__(self, epsilon=1e-08, **kwargs):
super().__init__(**kwargs)
self.epsilon = epsilon
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Lotayou/BasicSR
|
InstanceNorm2d
| false
| 2,805
|
[
"Apache-2.0",
"MIT"
] | 0
|
6cf9a706dd680d54f7dc26e87318ff79f76c0dbf
|
https://github.com/Lotayou/BasicSR/tree/6cf9a706dd680d54f7dc26e87318ff79f76c0dbf
|
BCE_disc_sm_v2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCE_disc_sm_v2(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v2, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, x, labels):
assert (x >= 0).all() and (x <= 1).all(), 'x is wrong'
assert (labels >= 0).all() and (labels <= 1).all(), 'labels is wrong'
labels = (1 - self.lb_sm) * labels + self.lb_sm * (1 - labels)
loss = F.binary_cross_entropy(x, labels, weight=self.weight_list,
reduction='none')
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_binary_cross_entropy_mean_mul_rsub_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp19 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp25 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp35 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp41 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp51 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp57 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 0.8
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = 0.2
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp8 = tmp7 - tmp3
tmp10 = -tmp9
tmp11 = libdevice.log1p(tmp10)
tmp12 = -100.0
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp8 * tmp13
tmp15 = tl_math.log(tmp9)
tmp16 = triton_helpers.maximum(tmp15, tmp12)
tmp17 = tmp7 * tmp16
tmp18 = tmp14 - tmp17
tmp20 = tmp19 * tmp1
tmp21 = tmp3 - tmp19
tmp22 = tmp21 * tmp5
tmp23 = tmp20 + tmp22
tmp24 = tmp23 - tmp3
tmp26 = -tmp25
tmp27 = libdevice.log1p(tmp26)
tmp28 = triton_helpers.maximum(tmp27, tmp12)
tmp29 = tmp24 * tmp28
tmp30 = tl_math.log(tmp25)
tmp31 = triton_helpers.maximum(tmp30, tmp12)
tmp32 = tmp23 * tmp31
tmp33 = tmp29 - tmp32
tmp34 = tmp18 + tmp33
tmp36 = tmp35 * tmp1
tmp37 = tmp3 - tmp35
tmp38 = tmp37 * tmp5
tmp39 = tmp36 + tmp38
tmp40 = tmp39 - tmp3
tmp42 = -tmp41
tmp43 = libdevice.log1p(tmp42)
tmp44 = triton_helpers.maximum(tmp43, tmp12)
tmp45 = tmp40 * tmp44
tmp46 = tl_math.log(tmp41)
tmp47 = triton_helpers.maximum(tmp46, tmp12)
tmp48 = tmp39 * tmp47
tmp49 = tmp45 - tmp48
tmp50 = tmp34 + tmp49
tmp52 = tmp51 * tmp1
tmp53 = tmp3 - tmp51
tmp54 = tmp53 * tmp5
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp3
tmp58 = -tmp57
tmp59 = libdevice.log1p(tmp58)
tmp60 = triton_helpers.maximum(tmp59, tmp12)
tmp61 = tmp56 * tmp60
tmp62 = tl_math.log(tmp57)
tmp63 = triton_helpers.maximum(tmp62, tmp12)
tmp64 = tmp55 * tmp63
tmp65 = tmp61 - tmp64
tmp66 = tmp50 + tmp65
tmp67 = 4.0
tmp68 = tmp66 / tmp67
tl.store(out_ptr0 + x0, tmp68, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_binary_cross_entropy_mean_mul_rsub_0[grid(64)](
arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BCE_disc_sm_v2New(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v2New, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
BCE_disc_sm_v2
| false
| 2,806
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
SelfAttention0
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
self.d_k = d_model // h
self.h = h
self.W_q = nn.Linear(d_model, self.h * self.d_k)
self.W_k = nn.Linear(d_model, self.h * self.d_k)
self.W_v = nn.Linear(d_model, self.h * self.d_k)
self.W_o = nn.Linear(self.h * self.d_k, d_model, bias=False)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def __attention(self, query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0,
-1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
assert key.dim() == 3 and query.dim() == 3 and value.dim() == 3
batch_size = query.size(0)
query = self.W_q(query).view(batch_size, -1, self.h, self.d_k)
key = self.W_k(key).view(batch_size, -1, self.h, self.d_k)
value = self.W_v(value).view(batch_size, -1, self.h, self.d_k)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
x, self.attn = self.__attention(query, key, value, dropout=self.
dropout, mask=mask)
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h *
self.d_k)
x = self.W_o(x)
return x
class SelfAttention0(nn.Module):
"""
Self attention
Permutation EQUIvariant
"""
def __init__(self, heads, d_model, dropout=0.0):
super(SelfAttention0, self).__init__()
self.size = d_model
self.attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.sublayer = lambda x: self.attn(x, x, x)
def forward(self, x):
assert x.dim() == 3
"""Apply residual connection to any sublayer with the _same size_."""
return self.sublayer(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_8, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
self.d_k = d_model // h
self.h = h
self.W_q = nn.Linear(d_model, self.h * self.d_k)
self.W_k = nn.Linear(d_model, self.h * self.d_k)
self.W_v = nn.Linear(d_model, self.h * self.d_k)
self.W_o = nn.Linear(self.h * self.d_k, d_model, bias=False)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def __attention(self, query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0,
-1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
assert key.dim() == 3 and query.dim() == 3 and value.dim() == 3
batch_size = query.size(0)
query = self.W_q(query).view(batch_size, -1, self.h, self.d_k)
key = self.W_k(key).view(batch_size, -1, self.h, self.d_k)
value = self.W_v(value).view(batch_size, -1, self.h, self.d_k)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
x, self.attn = self.__attention(query, key, value, dropout=self.
dropout, mask=mask)
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h *
self.d_k)
x = self.W_o(x)
return x
class SelfAttention0New(nn.Module):
"""
Self attention
Permutation EQUIvariant
"""
def __init__(self, heads, d_model, dropout=0.0):
super(SelfAttention0New, self).__init__()
self.size = d_model
self.attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.sublayer = lambda x: self.attn(x, x, x)
def forward(self, input_0):
primals_2 = self.attn.W_q.weight
primals_3 = self.attn.W_q.bias
primals_4 = self.attn.W_k.weight
primals_5 = self.attn.W_k.bias
primals_6 = self.attn.W_v.weight
primals_7 = self.attn.W_v.bias
primals_8 = self.attn.W_o.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
SSussexGit/deepikachu
|
SelfAttention0
| false
| 2,807
|
[
"MIT"
] | 0
|
72999c4a3f1767c3e5f332fe64cba9240ef43a79
|
https://github.com/SSussexGit/deepikachu/tree/72999c4a3f1767c3e5f332fe64cba9240ef43a79
|
SelfAttention
|
import torch
from torch import nn
import torch.nn.functional as F
def conv1d(ni: 'int', no: 'int', ks: 'int'=1, stride: 'int'=1, padding:
'int'=0, bias: 'bool'=True):
"""
Create and initialize a `nn.Conv1d` layer with spectral normalization.
"""
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias:
conv.bias.data.zero_()
return conv
class SelfAttention(nn.Module):
"""
# self-attention implementation from https://github.com/fastai/fastai/blob/5c51f9eabf76853a89a9bc5741804d2ed4407e49/fastai/layers.py
Self attention layer for nd
"""
def __init__(self, n_channels: 'int', div):
super(SelfAttention, self).__init__()
if n_channels > 1:
self.query = conv1d(n_channels, n_channels // div)
self.key = conv1d(n_channels, n_channels // div)
else:
self.query = conv1d(n_channels, n_channels)
self.key = conv1d(n_channels, n_channels)
self.value = conv1d(n_channels, n_channels)
self.gamma = nn.Parameter(torch.tensor([0.0]))
def forward(self, x):
size = x.size()
x = x.view(*size[:2], -1)
f, g, h = self.query(x), self.key(x), self.value(x)
beta = F.softmax(torch.bmm(f.permute(0, 2, 1).contiguous(), g), dim=1)
o = self.gamma * torch.bmm(h, beta) + x
return o.view(*size).contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'div': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 256 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 256
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 16), (64, 16, 1), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (4, 1, 16), (16, 16, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 16), (64, 16, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (4, 1, 16), (16, 16, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 16), (64, 16, 1), 0), primals_6, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (4, 4, 16), (64, 16, 1))
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf3, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf4 = buf0
del buf0
triton_poi_fused_convolution_1[grid(64)](buf4, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf5 = reinterpret_tensor(buf1, (4, 1, 16), (16, 64, 1), 0)
del buf1
triton_poi_fused_convolution_1[grid(64)](buf5, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 16, 1), (16, 1, 0),
0), buf5, out=buf6)
buf7 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32)
buf8 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32)
triton_per_fused__softmax_2[grid(64)](buf6, buf7, buf8, 64, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf9 = buf6
del buf6
triton_poi_fused__softmax_3[grid(1024)](buf9, buf7, buf8, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del buf8
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(buf3, buf9, out=buf10)
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(256)](primals_8, buf10, primals_1,
buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
return reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, primals_4, primals_6, primals_8, reinterpret_tensor(
primals_1, (4, 4, 16), (64, 16, 1), 0
), buf9, buf10, reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0
), buf4, reinterpret_tensor(buf5, (4, 16, 1), (16, 1, 16), 0)
def conv1d(ni: 'int', no: 'int', ks: 'int'=1, stride: 'int'=1, padding:
'int'=0, bias: 'bool'=True):
"""
Create and initialize a `nn.Conv1d` layer with spectral normalization.
"""
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias:
conv.bias.data.zero_()
return conv
class SelfAttentionNew(nn.Module):
"""
# self-attention implementation from https://github.com/fastai/fastai/blob/5c51f9eabf76853a89a9bc5741804d2ed4407e49/fastai/layers.py
Self attention layer for nd
"""
def __init__(self, n_channels: 'int', div):
super(SelfAttentionNew, self).__init__()
if n_channels > 1:
self.query = conv1d(n_channels, n_channels // div)
self.key = conv1d(n_channels, n_channels // div)
else:
self.query = conv1d(n_channels, n_channels)
self.key = conv1d(n_channels, n_channels)
self.value = conv1d(n_channels, n_channels)
self.gamma = nn.Parameter(torch.tensor([0.0]))
def forward(self, input_0):
primals_3 = self.gamma
primals_2 = self.query.weight
primals_5 = self.query.bias
primals_4 = self.key.weight
primals_8 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
STRCSussex-UbiCompSiegen/dl_har_model
|
SelfAttention
| false
| 2,808
|
[
"MIT"
] | 0
|
caac0f87fc7dd08a5d6ad3e4455ee25b35f5e7b4
|
https://github.com/STRCSussex-UbiCompSiegen/dl_har_model/tree/caac0f87fc7dd08a5d6ad3e4455ee25b35f5e7b4
|
MSE_disc
|
import torch
import torch.nn as nn
class MSE_disc(nn.Module):
def __init__(self, weight_list=None):
super(MSE_disc, self).__init__()
self.weight_list = weight_list
def forward(self, x, labels):
loss = (x - labels) ** 2
if self.weight_list is not None:
loss = loss * self.weight_list
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp5 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp10 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp14 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp15 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tl.store(out_ptr0 + x0, tmp20, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_pow_sub_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MSE_discNew(nn.Module):
def __init__(self, weight_list=None):
super(MSE_discNew, self).__init__()
self.weight_list = weight_list
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
MSE_disc
| false
| 2,809
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
QNetwork
|
import torch
from torch import nn
import torch.nn.functional as F
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetwork, self).__init__()
self.fc1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, 1)
self.fc4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.fc5 = nn.Linear(hidden_dim, hidden_dim)
self.fc6 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
x1 = F.relu(self.fc1(sa))
x1 = F.relu(self.fc2(x1))
x1 = self.fc3(x1)
x2 = F.relu(self.fc4(sa))
x2 = F.relu(self.fc5(x2))
x2 = self.fc6(x2)
return x1, x2
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (1, 4), (4, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8
), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(16)](buf8, primals_10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (4, 4), (1,
4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_1[grid(16)](buf10, primals_12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class QNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetworkNew, self).__init__()
self.fc1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, 1)
self.fc4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.fc5 = nn.Linear(hidden_dim, hidden_dim)
self.fc6 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_9 = self.fc4.weight
primals_10 = self.fc4.bias
primals_2 = self.fc5.weight
primals_12 = self.fc5.bias
primals_13 = self.fc6.weight
primals_14 = self.fc6.bias
primals_5 = input_0
primals_11 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
SINGROUP/Atom_manipulation_with_RL
|
QNetwork
| false
| 2,810
|
[
"MIT"
] | 0
|
428e05459ed395f1a5fc00a7c65a9b0c26210ee8
|
https://github.com/SINGROUP/Atom_manipulation_with_RL/tree/428e05459ed395f1a5fc00a7c65a9b0c26210ee8
|
BCE_disc_sm_v6
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCE_disc_sm_v6(nn.Module):
def __init__(self, weight_list=None, lb_sm1=0.5, lb_sm0=0.1):
super(BCE_disc_sm_v6, self).__init__()
self.weight_list = weight_list
self.lb_sm1 = lb_sm1
self.lb_sm0 = lb_sm0
def forward(self, x, labels):
assert (x >= 0).all() and (x <= 1).all(), 'x is wrong'
assert (labels >= 0).all() and (labels <= 1).all(), 'labels is wrong'
labels = self.lb_sm1 * labels + self.lb_sm0 * (1 - labels)
loss = F.binary_cross_entropy(x, labels, weight=self.weight_list,
reduction='none')
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_binary_cross_entropy_mean_mul_rsub_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp19 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp25 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp35 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp41 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp51 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp57 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = 0.1
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp8 = tmp7 - tmp3
tmp10 = -tmp9
tmp11 = libdevice.log1p(tmp10)
tmp12 = -100.0
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp8 * tmp13
tmp15 = tl_math.log(tmp9)
tmp16 = triton_helpers.maximum(tmp15, tmp12)
tmp17 = tmp7 * tmp16
tmp18 = tmp14 - tmp17
tmp20 = tmp19 * tmp1
tmp21 = tmp3 - tmp19
tmp22 = tmp21 * tmp5
tmp23 = tmp20 + tmp22
tmp24 = tmp23 - tmp3
tmp26 = -tmp25
tmp27 = libdevice.log1p(tmp26)
tmp28 = triton_helpers.maximum(tmp27, tmp12)
tmp29 = tmp24 * tmp28
tmp30 = tl_math.log(tmp25)
tmp31 = triton_helpers.maximum(tmp30, tmp12)
tmp32 = tmp23 * tmp31
tmp33 = tmp29 - tmp32
tmp34 = tmp18 + tmp33
tmp36 = tmp35 * tmp1
tmp37 = tmp3 - tmp35
tmp38 = tmp37 * tmp5
tmp39 = tmp36 + tmp38
tmp40 = tmp39 - tmp3
tmp42 = -tmp41
tmp43 = libdevice.log1p(tmp42)
tmp44 = triton_helpers.maximum(tmp43, tmp12)
tmp45 = tmp40 * tmp44
tmp46 = tl_math.log(tmp41)
tmp47 = triton_helpers.maximum(tmp46, tmp12)
tmp48 = tmp39 * tmp47
tmp49 = tmp45 - tmp48
tmp50 = tmp34 + tmp49
tmp52 = tmp51 * tmp1
tmp53 = tmp3 - tmp51
tmp54 = tmp53 * tmp5
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp3
tmp58 = -tmp57
tmp59 = libdevice.log1p(tmp58)
tmp60 = triton_helpers.maximum(tmp59, tmp12)
tmp61 = tmp56 * tmp60
tmp62 = tl_math.log(tmp57)
tmp63 = triton_helpers.maximum(tmp62, tmp12)
tmp64 = tmp55 * tmp63
tmp65 = tmp61 - tmp64
tmp66 = tmp50 + tmp65
tmp67 = 4.0
tmp68 = tmp66 / tmp67
tl.store(out_ptr0 + x0, tmp68, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_binary_cross_entropy_mean_mul_rsub_0[grid(64)](
arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BCE_disc_sm_v6New(nn.Module):
def __init__(self, weight_list=None, lb_sm1=0.5, lb_sm0=0.1):
super(BCE_disc_sm_v6New, self).__init__()
self.weight_list = weight_list
self.lb_sm1 = lb_sm1
self.lb_sm0 = lb_sm0
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
BCE_disc_sm_v6
| false
| 2,811
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
MultiHeadedAttention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
self.d_k = d_model // h
self.h = h
self.W_q = nn.Linear(d_model, self.h * self.d_k)
self.W_k = nn.Linear(d_model, self.h * self.d_k)
self.W_v = nn.Linear(d_model, self.h * self.d_k)
self.W_o = nn.Linear(self.h * self.d_k, d_model, bias=False)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def __attention(self, query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0,
-1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
assert key.dim() == 3 and query.dim() == 3 and value.dim() == 3
batch_size = query.size(0)
query = self.W_q(query).view(batch_size, -1, self.h, self.d_k)
key = self.W_k(key).view(batch_size, -1, self.h, self.d_k)
value = self.W_v(value).view(batch_size, -1, self.h, self.d_k)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
x, self.attn = self.__attention(query, key, value, dropout=self.
dropout, mask=mask)
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h *
self.d_k)
x = self.W_o(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'h': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf1)
del primals_6
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_5, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_7, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_9, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11)
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttentionNew(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"""Take in model size and number of heads."""
super(MultiHeadedAttentionNew, self).__init__()
self.d_k = d_model // h
self.h = h
self.W_q = nn.Linear(d_model, self.h * self.d_k)
self.W_k = nn.Linear(d_model, self.h * self.d_k)
self.W_v = nn.Linear(d_model, self.h * self.d_k)
self.W_o = nn.Linear(self.h * self.d_k, d_model, bias=False)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def __attention(self, query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0,
-1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, input_0, input_1, input_2):
primals_4 = self.W_q.weight
primals_5 = self.W_q.bias
primals_6 = self.W_k.weight
primals_7 = self.W_k.bias
primals_8 = self.W_v.weight
primals_9 = self.W_v.bias
primals_10 = self.W_o.weight
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
SSussexGit/deepikachu
|
MultiHeadedAttention
| false
| 2,812
|
[
"MIT"
] | 0
|
72999c4a3f1767c3e5f332fe64cba9240ef43a79
|
https://github.com/SSussexGit/deepikachu/tree/72999c4a3f1767c3e5f332fe64cba9240ef43a79
|
InnerProductModel
|
import torch
class InnerProductModel(torch.nn.Module):
@staticmethod
def is_valid_model_type(model_type):
raise NotImplementedError
@staticmethod
def get_model_from_type(model_type):
raise NotImplementedError
@property
def loss_criterion(self):
return torch.nn.MSELoss()
def __init__(self, n):
super().__init__()
self.layer = torch.nn.Linear(n, 1, bias=False)
self.layer.weight.data = torch.arange(n, dtype=torch.float32)
def forward(self, x):
return self.layer(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x0, tmp18, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_mv_0[grid(64)](primals_2, primals_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
return reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), primals_2
class InnerProductModelNew(torch.nn.Module):
@staticmethod
def is_valid_model_type(model_type):
raise NotImplementedError
@staticmethod
def get_model_from_type(model_type):
raise NotImplementedError
@property
def loss_criterion(self):
return torch.nn.MSELoss()
def __init__(self, n):
super().__init__()
self.layer = torch.nn.Linear(n, 1, bias=False)
self.layer.weight.data = torch.arange(n, dtype=torch.float32)
def forward(self, input_0):
primals_1 = self.layer.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
SamuelGong/plato
|
InnerProductModel
| false
| 2,813
|
[
"Apache-2.0"
] | 0
|
726f965620e63dfe18cc2edf07cc010a751f0231
|
https://github.com/SamuelGong/plato/tree/726f965620e63dfe18cc2edf07cc010a751f0231
|
BCE_disc_sm_v8
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCE_disc_sm_v8(nn.Module):
def __init__(self, lb_sm=0.2):
super(BCE_disc_sm_v8, self).__init__()
self.lb_sm = lb_sm
def forward(self, x, labels):
assert (x >= 0).all() and (x <= 1).all(), 'x is wrong'
assert (labels >= 0).all() and (labels <= 1).all(), 'labels is wrong'
labels = torch.ones_like(labels) * self.lb_sm
loss = F.binary_cross_entropy(x, labels, reduction='none')
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_mean_mul_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp12 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp32 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = -tmp0
tmp2 = libdevice.log1p(tmp1)
tmp3 = -100.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = -0.800000011920929
tmp6 = tmp5 * tmp4
tmp7 = tl_math.log(tmp0)
tmp8 = triton_helpers.maximum(tmp7, tmp3)
tmp9 = 0.20000000298023224
tmp10 = tmp9 * tmp8
tmp11 = tmp6 - tmp10
tmp13 = -tmp12
tmp14 = libdevice.log1p(tmp13)
tmp15 = triton_helpers.maximum(tmp14, tmp3)
tmp16 = tmp5 * tmp15
tmp17 = tl_math.log(tmp12)
tmp18 = triton_helpers.maximum(tmp17, tmp3)
tmp19 = tmp9 * tmp18
tmp20 = tmp16 - tmp19
tmp21 = tmp11 + tmp20
tmp23 = -tmp22
tmp24 = libdevice.log1p(tmp23)
tmp25 = triton_helpers.maximum(tmp24, tmp3)
tmp26 = tmp5 * tmp25
tmp27 = tl_math.log(tmp22)
tmp28 = triton_helpers.maximum(tmp27, tmp3)
tmp29 = tmp9 * tmp28
tmp30 = tmp26 - tmp29
tmp31 = tmp21 + tmp30
tmp33 = -tmp32
tmp34 = libdevice.log1p(tmp33)
tmp35 = triton_helpers.maximum(tmp34, tmp3)
tmp36 = tmp5 * tmp35
tmp37 = tl_math.log(tmp32)
tmp38 = triton_helpers.maximum(tmp37, tmp3)
tmp39 = tmp9 * tmp38
tmp40 = tmp36 - tmp39
tmp41 = tmp31 + tmp40
tmp42 = 4.0
tmp43 = tmp41 / tmp42
tl.store(out_ptr0 + x0, tmp43, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_mean_mul_0[grid(64)](arg1_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
return buf0,
class BCE_disc_sm_v8New(nn.Module):
def __init__(self, lb_sm=0.2):
super(BCE_disc_sm_v8New, self).__init__()
self.lb_sm = lb_sm
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
BCE_disc_sm_v8
| false
| 2,814
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
InputConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def _get_padding(kernel_size, stride, dilation):
padding = (stride - 1 + dilation * (kernel_size - 1)) // 2
return padding
class InputConv(nn.Module):
def __init__(self, inp, outp, k=3, stride=1, dilation=1):
super(InputConv, self).__init__()
self.conv = nn.Conv2d(inp, outp, k, stride, padding=_get_padding(k,
stride, dilation), dilation=dilation)
def forward(self, x):
return F.relu6(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp': 4, 'outp': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_hardtanh_hardtanh_backward_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 <= tmp3
tmp8 = tmp2 >= tmp5
tmp9 = tmp7 | tmp8
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_hardtanh_hardtanh_backward_0[grid(256)](
buf0, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3, buf2
def _get_padding(kernel_size, stride, dilation):
padding = (stride - 1 + dilation * (kernel_size - 1)) // 2
return padding
class InputConvNew(nn.Module):
def __init__(self, inp, outp, k=3, stride=1, dilation=1):
super(InputConvNew, self).__init__()
self.conv = nn.Conv2d(inp, outp, k, stride, padding=_get_padding(k,
stride, dilation), dilation=dilation)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sanjay-Ganeshan/webcam-mouse
|
InputConv
| false
| 2,815
|
[
"Apache-2.0"
] | 0
|
240d1ee00816440e971c8c747bef02c12f3e5d57
|
https://github.com/Sanjay-Ganeshan/webcam-mouse/tree/240d1ee00816440e971c8c747bef02c12f3e5d57
|
BCE_disc_sm_v3
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCE_disc_sm_v3(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v3, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, x, labels):
assert (x >= 0).all() and (x <= 1).all(), 'x is wrong'
assert (labels >= 0).all() and (labels <= 1).all(), 'labels is wrong'
labels = (1 - self.lb_sm) * labels
loss = F.binary_cross_entropy(x, labels, weight=self.weight_list,
reduction='none')
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_mean_mul_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp15 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp18 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp28 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp31 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp41 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp44 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 0.8
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 - tmp3
tmp6 = -tmp5
tmp7 = libdevice.log1p(tmp6)
tmp8 = -100.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp4 * tmp9
tmp11 = tl_math.log(tmp5)
tmp12 = triton_helpers.maximum(tmp11, tmp8)
tmp13 = tmp2 * tmp12
tmp14 = tmp10 - tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp16 - tmp3
tmp19 = -tmp18
tmp20 = libdevice.log1p(tmp19)
tmp21 = triton_helpers.maximum(tmp20, tmp8)
tmp22 = tmp17 * tmp21
tmp23 = tl_math.log(tmp18)
tmp24 = triton_helpers.maximum(tmp23, tmp8)
tmp25 = tmp16 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = tmp14 + tmp26
tmp29 = tmp28 * tmp1
tmp30 = tmp29 - tmp3
tmp32 = -tmp31
tmp33 = libdevice.log1p(tmp32)
tmp34 = triton_helpers.maximum(tmp33, tmp8)
tmp35 = tmp30 * tmp34
tmp36 = tl_math.log(tmp31)
tmp37 = triton_helpers.maximum(tmp36, tmp8)
tmp38 = tmp29 * tmp37
tmp39 = tmp35 - tmp38
tmp40 = tmp27 + tmp39
tmp42 = tmp41 * tmp1
tmp43 = tmp42 - tmp3
tmp45 = -tmp44
tmp46 = libdevice.log1p(tmp45)
tmp47 = triton_helpers.maximum(tmp46, tmp8)
tmp48 = tmp43 * tmp47
tmp49 = tl_math.log(tmp44)
tmp50 = triton_helpers.maximum(tmp49, tmp8)
tmp51 = tmp42 * tmp50
tmp52 = tmp48 - tmp51
tmp53 = tmp40 + tmp52
tmp54 = 4.0
tmp55 = tmp53 / tmp54
tl.store(out_ptr0 + x0, tmp55, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_mean_mul_0[grid(64)](arg0_1,
arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BCE_disc_sm_v3New(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v3New, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
BCE_disc_sm_v3
| false
| 2,816
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
ResidualSelfAttention0
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
self.d_k = d_model // h
self.h = h
self.W_q = nn.Linear(d_model, self.h * self.d_k)
self.W_k = nn.Linear(d_model, self.h * self.d_k)
self.W_v = nn.Linear(d_model, self.h * self.d_k)
self.W_o = nn.Linear(self.h * self.d_k, d_model, bias=False)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def __attention(self, query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0,
-1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
assert key.dim() == 3 and query.dim() == 3 and value.dim() == 3
batch_size = query.size(0)
query = self.W_q(query).view(batch_size, -1, self.h, self.d_k)
key = self.W_k(key).view(batch_size, -1, self.h, self.d_k)
value = self.W_v(value).view(batch_size, -1, self.h, self.d_k)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
x, self.attn = self.__attention(query, key, value, dropout=self.
dropout, mask=mask)
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h *
self.d_k)
x = self.W_o(x)
return x
class ResidualSelfAttention0(nn.Module):
"""
Residual connection and layer norm with self attention
Permutation EQUIvariant
"""
def __init__(self, heads, d_model, dropout=0.0):
super(ResidualSelfAttention0, self).__init__()
self.size = d_model
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.sublayer = lambda x: self.attn(x, x, x)
def forward(self, x):
assert x.dim() == 3
"""Apply residual connection to any sublayer with the _same size_."""
return x + self.norm(self.dropout(self.sublayer(x)))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tmp0 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_4[grid(16)](buf11, buf12, buf13,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(64)](primals_1, buf11,
buf12, buf13, primals_9, primals_10, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_10
return buf14, buf7, primals_9, reinterpret_tensor(primals_1, (16, 4), (
4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), buf11, primals_8, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
self.d_k = d_model // h
self.h = h
self.W_q = nn.Linear(d_model, self.h * self.d_k)
self.W_k = nn.Linear(d_model, self.h * self.d_k)
self.W_v = nn.Linear(d_model, self.h * self.d_k)
self.W_o = nn.Linear(self.h * self.d_k, d_model, bias=False)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def __attention(self, query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0,
-1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
assert key.dim() == 3 and query.dim() == 3 and value.dim() == 3
batch_size = query.size(0)
query = self.W_q(query).view(batch_size, -1, self.h, self.d_k)
key = self.W_k(key).view(batch_size, -1, self.h, self.d_k)
value = self.W_v(value).view(batch_size, -1, self.h, self.d_k)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
x, self.attn = self.__attention(query, key, value, dropout=self.
dropout, mask=mask)
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h *
self.d_k)
x = self.W_o(x)
return x
class ResidualSelfAttention0New(nn.Module):
"""
Residual connection and layer norm with self attention
Permutation EQUIvariant
"""
def __init__(self, heads, d_model, dropout=0.0):
super(ResidualSelfAttention0New, self).__init__()
self.size = d_model
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.sublayer = lambda x: self.attn(x, x, x)
def forward(self, input_0):
primals_3 = self.norm.weight
primals_5 = self.norm.bias
primals_2 = self.attn.W_q.weight
primals_7 = self.attn.W_q.bias
primals_4 = self.attn.W_k.weight
primals_9 = self.attn.W_k.bias
primals_6 = self.attn.W_v.weight
primals_10 = self.attn.W_v.bias
primals_8 = self.attn.W_o.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
SSussexGit/deepikachu
|
ResidualSelfAttention0
| false
| 2,817
|
[
"MIT"
] | 0
|
72999c4a3f1767c3e5f332fe64cba9240ef43a79
|
https://github.com/SSussexGit/deepikachu/tree/72999c4a3f1767c3e5f332fe64cba9240ef43a79
|
MaxLayer
|
import torch
from torch import Tensor
import torch.nn
class MaxLayer(torch.nn.Module):
"""Placeholder Layer for Max operation"""
def __init__(self):
super(MaxLayer, self).__init__()
def forward(self, inputs: 'Tensor'):
return inputs.max(dim=-1)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MaxLayerNew(torch.nn.Module):
"""Placeholder Layer for Max operation"""
def __init__(self):
super(MaxLayerNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MoritzWag/LPDN
|
MaxLayer
| false
| 2,818
|
[
"MIT"
] | 0
|
a88a5a03f18c7f87879918369b8dc7a0e3abb02b
|
https://github.com/MoritzWag/LPDN/tree/a88a5a03f18c7f87879918369b8dc7a0e3abb02b
|
BCE_disc_sm_v5
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCE_disc_sm_v5(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v5, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, x, labels):
assert (labels >= 0).all() and (labels <= 1).all(), 'labels is wrong'
labels = labels + self.lb_sm * (1 - labels)
labels = labels / labels.sum(dim=1, keepdim=True)
loss = -F.log_softmax(x, dim=1) * labels
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_div_mul_neg_rsub_sum_1(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + x3, xmask)
tmp21 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = -tmp13
tmp16 = 1.0
tmp17 = tmp16 - tmp15
tmp18 = 0.2
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = tmp16 - tmp21
tmp23 = tmp22 * tmp18
tmp24 = tmp21 + tmp23
tmp26 = tmp16 - tmp25
tmp27 = tmp26 * tmp18
tmp28 = tmp25 + tmp27
tmp29 = tmp24 + tmp28
tmp31 = tmp16 - tmp30
tmp32 = tmp31 * tmp18
tmp33 = tmp30 + tmp32
tmp34 = tmp29 + tmp33
tmp36 = tmp16 - tmp35
tmp37 = tmp36 * tmp18
tmp38 = tmp35 + tmp37
tmp39 = tmp34 + tmp38
tmp40 = tmp20 / tmp39
tmp41 = tmp14 * tmp40
tl.store(out_ptr0 + x3, tmp41, xmask)
@triton.jit
def triton_poi_fused_mean_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_add_div_mul_neg_rsub_sum_1[grid(256)](
buf0, arg0_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mean_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
return buf2,
class BCE_disc_sm_v5New(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v5New, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
BCE_disc_sm_v5
| false
| 2,819
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
SeperableConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def _get_padding(kernel_size, stride, dilation):
padding = (stride - 1 + dilation * (kernel_size - 1)) // 2
return padding
class SeperableConv(nn.Module):
def __init__(self, inp, outp, k=3, stride=1, dilation=1):
super(SeperableConv, self).__init__()
self.depthwise = nn.Conv2d(inp, inp, k, stride, padding=
_get_padding(k, stride, dilation), dilation=dilation, groups=inp)
self.pointwise = nn.Conv2d(inp, outp, 1, 1)
def forward(self, x):
x = F.relu6(self.depthwise(x))
x = F.relu6(self.pointwise(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp': 4, 'outp': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_hardtanh_hardtanh_backward_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 <= tmp3
tmp8 = tmp2 >= tmp5
tmp9 = tmp7 | tmp8
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_hardtanh_hardtanh_backward_0[grid(256)](
buf0, primals_2, buf1, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf0
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_hardtanh_hardtanh_backward_0[grid(256)](
buf2, primals_5, buf3, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4, buf5
def _get_padding(kernel_size, stride, dilation):
padding = (stride - 1 + dilation * (kernel_size - 1)) // 2
return padding
class SeperableConvNew(nn.Module):
def __init__(self, inp, outp, k=3, stride=1, dilation=1):
super(SeperableConvNew, self).__init__()
self.depthwise = nn.Conv2d(inp, inp, k, stride, padding=
_get_padding(k, stride, dilation), dilation=dilation, groups=inp)
self.pointwise = nn.Conv2d(inp, outp, 1, 1)
def forward(self, input_0):
primals_1 = self.depthwise.weight
primals_2 = self.depthwise.bias
primals_4 = self.pointwise.weight
primals_5 = self.pointwise.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Sanjay-Ganeshan/webcam-mouse
|
SeperableConv
| false
| 2,820
|
[
"Apache-2.0"
] | 0
|
240d1ee00816440e971c8c747bef02c12f3e5d57
|
https://github.com/Sanjay-Ganeshan/webcam-mouse/tree/240d1ee00816440e971c8c747bef02c12f3e5d57
|
CustomInverse
|
import torch
class CustomTorchOp(torch.autograd.Function):
@staticmethod
def symbolic(g, input):
return g.op('torchcustom::Add10', input)
@staticmethod
def forward(ctx, x):
return x + 10
class CustomInverse(torch.nn.Module):
def forward(self, x, y):
ress = CustomTorchOp.apply(torch.inverse(x))
return ress, torch.all(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 10.0
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_all_1(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.any(tmp3, 0))
tmp6 = tmp5 == 0
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.linalg_inv_ex.default(arg0_1)
del arg0_1
buf1 = buf0[0]
del buf0
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf3, 256, XBLOCK=128, num_warps=
4, num_stages=1)
buf4 = empty_strided_cuda((), (), torch.bool)
buf5 = buf4
del buf4
triton_per_fused_all_1[grid(1)](buf5, arg1_1, 1, 256, num_warps=2,
num_stages=1)
del arg1_1
return buf3, buf5
class CustomTorchOp(torch.autograd.Function):
@staticmethod
def symbolic(g, input):
return g.op('torchcustom::Add10', input)
@staticmethod
def forward(ctx, x):
return x + 10
class CustomInverseNew(torch.nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
Sanster/onnxruntime-extensions
|
CustomInverse
| false
| 2,821
|
[
"MIT"
] | 0
|
6eb41afcb2394d94ee90c7ae409fa96122e4cace
|
https://github.com/Sanster/onnxruntime-extensions/tree/6eb41afcb2394d94ee90c7ae409fa96122e4cace
|
DQN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
def __init__(self, state_dim, action_dim, fc1_unit=64, fc2_unit=64,
fc3_unit=128):
super(DQN, self).__init__()
self.fc1 = nn.Linear(state_dim, fc1_unit)
self.fc2 = nn.Linear(fc1_unit, fc2_unit)
self.A_fc1 = nn.Linear(fc2_unit, fc3_unit)
self.V_fc1 = nn.Linear(fc2_unit, fc3_unit)
self.A_fc2 = nn.Linear(fc3_unit, action_dim)
self.V_fc2 = nn.Linear(fc3_unit, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
A = F.relu(self.A_fc1(x))
V = F.relu(self.V_fc1(x))
A = self.A_fc2(A)
V = self.V_fc2(V)
Q = V + (A - A.mean())
return Q
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_per_fused_add_mean_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp7 = tmp4 + tmp6
tmp8 = 256.0
tmp9 = tmp3 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp7 + tmp10
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64), (64, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 64), (64, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (4, 128), (128, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (1, 128), (128, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf15, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf14 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf14, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_6, (64, 128), (1, 64), 0), out=buf4)
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_8, (64, 128), (1, 64), 0), out=buf5)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf4
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf6,
primals_7, buf13, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_10, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf7)
del primals_11
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf5
buf12 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf8,
primals_9, buf12, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_12, (128, 1), (1, 128), 0), out=buf9)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_mean_sub_2[grid(1)](buf7, buf9, primals_13,
buf11, 1, 256, num_warps=2, num_stages=1)
del buf7
del buf9
del primals_13
return (buf11, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 64), (64, 1), 0), reinterpret_tensor(buf6, (64, 128), (
128, 1), 0), reinterpret_tensor(buf8, (64, 128), (128, 1), 0),
primals_12, primals_10, buf12, primals_8, buf13, primals_6, buf14,
primals_4, buf15)
class DQNNew(nn.Module):
def __init__(self, state_dim, action_dim, fc1_unit=64, fc2_unit=64,
fc3_unit=128):
super(DQNNew, self).__init__()
self.fc1 = nn.Linear(state_dim, fc1_unit)
self.fc2 = nn.Linear(fc1_unit, fc2_unit)
self.A_fc1 = nn.Linear(fc2_unit, fc3_unit)
self.V_fc1 = nn.Linear(fc2_unit, fc3_unit)
self.A_fc2 = nn.Linear(fc3_unit, action_dim)
self.V_fc2 = nn.Linear(fc3_unit, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.A_fc1.weight
primals_7 = self.A_fc1.bias
primals_8 = self.V_fc1.weight
primals_9 = self.V_fc1.bias
primals_10 = self.A_fc2.weight
primals_11 = self.A_fc2.bias
primals_12 = self.V_fc2.weight
primals_13 = self.V_fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
ProfessorQu/Reinforcement-Learning
|
DQN
| false
| 2,822
|
[
"MIT"
] | 0
|
e1cd645fc5a7ce60248c1a96c560a38d1b9433cd
|
https://github.com/ProfessorQu/Reinforcement-Learning/tree/e1cd645fc5a7ce60248c1a96c560a38d1b9433cd
|
ScaledDotProductAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf4
return buf3, buf2, buf5
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1], output[2]
|
RuaBQ/FEAT
|
ScaledDotProductAttention
| false
| 2,823
|
[
"MIT"
] | 0
|
e46f56b03f8ef820d549cb385600a12bdf224de9
|
https://github.com/RuaBQ/FEAT/tree/e46f56b03f8ef820d549cb385600a12bdf224de9
|
MSE_cont
|
import torch
import torch.nn as nn
class MSE_cont(nn.Module):
def __init__(self, theta=1 / 10):
super(MSE_cont, self).__init__()
self.theta = theta
def forward(self, x, labels):
loss = (x - labels) ** 2
mask = loss.gt(self.theta).float()
loss = loss * mask
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_gt_mean_mul_pow_sub_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp16 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp17 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp24 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp25 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.1
tmp5 = tmp3 > tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp11 > tmp4
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 * tmp13
tmp15 = tmp7 + tmp14
tmp18 = tmp16 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tmp19 > tmp4
tmp21 = tmp20.to(tl.float32)
tmp22 = tmp19 * tmp21
tmp23 = tmp15 + tmp22
tmp26 = tmp24 - tmp25
tmp27 = tmp26 * tmp26
tmp28 = tmp27 > tmp4
tmp29 = tmp28.to(tl.float32)
tmp30 = tmp27 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.store(out_ptr0 + x0, tmp33, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_mean_mul_pow_sub_0[grid(64)](arg0_1,
arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MSE_contNew(nn.Module):
def __init__(self, theta=1 / 10):
super(MSE_contNew, self).__init__()
self.theta = theta
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
MSE_cont
| false
| 2,824
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
multiloss
|
import torch
import torch.nn as nn
class multiloss(nn.Module):
def __init__(self, objective_num):
super(multiloss, self).__init__()
self.objective_num = objective_num
self.log_var = nn.Parameter(torch.zeros(self.objective_num))
def forward(self, losses):
for i in range(len(losses)):
precision = torch.exp(-self.log_var[i])
if i == 0:
loss = precision * losses[i] + self.log_var[i]
else:
loss += precision * losses[i] + self.log_var[i]
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'objective_num': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_exp_mul_neg_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr0 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp11 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp19 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp23 = tl.load(in_ptr0 + 3)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp27 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = -tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 * tmp4
tmp6 = tmp5 + tmp1
tmp9 = -tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 * tmp11
tmp13 = tmp12 + tmp8
tmp14 = tmp6 + tmp13
tmp17 = -tmp16
tmp18 = tl_math.exp(tmp17)
tmp20 = tmp18 * tmp19
tmp21 = tmp20 + tmp16
tmp22 = tmp14 + tmp21
tmp25 = -tmp24
tmp26 = tl_math.exp(tmp25)
tmp28 = tmp26 * tmp27
tmp29 = tmp28 + tmp24
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_mul_neg_0[grid(64)](primals_2, primals_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_2
class multilossNew(nn.Module):
def __init__(self, objective_num):
super(multilossNew, self).__init__()
self.objective_num = objective_num
self.log_var = nn.Parameter(torch.zeros(self.objective_num))
def forward(self, input_0):
primals_2 = self.log_var
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ShaharLutatiPersonal/hyperhypernetworks
|
multiloss
| false
| 2,825
|
[
"MIT"
] | 0
|
16e2595d89ad0533c9d5a2c62870fb90f1b1dc42
|
https://github.com/ShaharLutatiPersonal/hyperhypernetworks/tree/16e2595d89ad0533c9d5a2c62870fb90f1b1dc42
|
ScaledDotProductAttention
|
import torch
import torch.optim.lr_scheduler
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_model, attention_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temper = d_model ** 0.5
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, attn_mask=None):
attn = torch.bmm(q, k.transpose(1, 2)) / self.temper
if attn_mask is not None:
assert attn_mask.size() == attn.size(
), 'Attention mask shape {} mismatch with Attention logit tensor shape {}.'.format(
attn_mask.size(), attn.size())
attn.data.masked_fill_(attn_mask, -float('inf'))
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.optim.lr_scheduler
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
def __init__(self, d_model, attention_dropout=0.1):
super(ScaledDotProductAttentionNew, self).__init__()
self.temper = d_model ** 0.5
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
Shengqiang-Zhang/self-attentive-parser
|
ScaledDotProductAttention
| false
| 2,826
|
[
"MIT"
] | 0
|
493f74c7acab9824d593f55d231754c5ac7cbb26
|
https://github.com/Shengqiang-Zhang/self-attentive-parser/tree/493f74c7acab9824d593f55d231754c5ac7cbb26
|
conv_block
|
import torch
import torch.nn as nn
class conv_block(nn.Module):
def __init__(self, init_shape):
super(conv_block, self).__init__()
self.conv0 = nn.Conv2d(in_channels=init_shape[0], out_channels=
init_shape[1], kernel_size=init_shape[2])
self.relu = nn.ELU()
nn.init.kaiming_uniform_(self.conv0.weight)
def forward(self, input):
out = self.relu(self.conv0(input))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'init_shape': [4, 4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(16)](buf1, primals_2, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf1
class conv_blockNew(nn.Module):
def __init__(self, init_shape):
super(conv_blockNew, self).__init__()
self.conv0 = nn.Conv2d(in_channels=init_shape[0], out_channels=
init_shape[1], kernel_size=init_shape[2])
self.relu = nn.ELU()
nn.init.kaiming_uniform_(self.conv0.weight)
def forward(self, input_0):
primals_1 = self.conv0.weight
primals_2 = self.conv0.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ShaharLutatiPersonal/hyperhypernetworks
|
conv_block
| false
| 2,827
|
[
"MIT"
] | 0
|
16e2595d89ad0533c9d5a2c62870fb90f1b1dc42
|
https://github.com/ShaharLutatiPersonal/hyperhypernetworks/tree/16e2595d89ad0533c9d5a2c62870fb90f1b1dc42
|
FusedLeakyReLU
|
import torch
from torch import nn
from torch.nn import functional as F
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
bias = self.bias[None, :, None, None]
try:
out = F.leaky_relu(input + bias, negative_slope=self.negative_slope
) * self.scale
except Exception:
code.interact('Something is wrong with bias', local={**globals(
), **locals()})
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 1.4142135623730951
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_mul_0[grid(256)](primals_2,
primals_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf1, buf0
class FusedLeakyReLUNew(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input_0):
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ShinoharaHare/stylegan2-pytorch
|
FusedLeakyReLU
| false
| 2,828
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 0
|
5a4b1c4e9753681bc1694195f3b2391527c1b525
|
https://github.com/ShinoharaHare/stylegan2-pytorch/tree/5a4b1c4e9753681bc1694195f3b2391527c1b525
|
BCE_disc_sm_v7
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCE_disc_sm_v7(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v7, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, x, labels):
assert (x >= 0).all() and (x <= 1).all(), 'x is wrong'
assert (labels >= 0).all() and (labels <= 1).all(), 'labels is wrong'
labels = labels / 3
loss = F.binary_cross_entropy(x, labels, weight=self.weight_list,
reduction='none')
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_div_mean_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp15 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp18 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp28 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp31 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp41 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp44 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 - tmp3
tmp6 = -tmp5
tmp7 = libdevice.log1p(tmp6)
tmp8 = -100.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp4 * tmp9
tmp11 = tl_math.log(tmp5)
tmp12 = triton_helpers.maximum(tmp11, tmp8)
tmp13 = tmp2 * tmp12
tmp14 = tmp10 - tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp16 - tmp3
tmp19 = -tmp18
tmp20 = libdevice.log1p(tmp19)
tmp21 = triton_helpers.maximum(tmp20, tmp8)
tmp22 = tmp17 * tmp21
tmp23 = tl_math.log(tmp18)
tmp24 = triton_helpers.maximum(tmp23, tmp8)
tmp25 = tmp16 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = tmp14 + tmp26
tmp29 = tmp28 * tmp1
tmp30 = tmp29 - tmp3
tmp32 = -tmp31
tmp33 = libdevice.log1p(tmp32)
tmp34 = triton_helpers.maximum(tmp33, tmp8)
tmp35 = tmp30 * tmp34
tmp36 = tl_math.log(tmp31)
tmp37 = triton_helpers.maximum(tmp36, tmp8)
tmp38 = tmp29 * tmp37
tmp39 = tmp35 - tmp38
tmp40 = tmp27 + tmp39
tmp42 = tmp41 * tmp1
tmp43 = tmp42 - tmp3
tmp45 = -tmp44
tmp46 = libdevice.log1p(tmp45)
tmp47 = triton_helpers.maximum(tmp46, tmp8)
tmp48 = tmp43 * tmp47
tmp49 = tl_math.log(tmp44)
tmp50 = triton_helpers.maximum(tmp49, tmp8)
tmp51 = tmp42 * tmp50
tmp52 = tmp48 - tmp51
tmp53 = tmp40 + tmp52
tmp54 = 4.0
tmp55 = tmp53 / tmp54
tl.store(out_ptr0 + x0, tmp55, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_div_mean_0[grid(64)](arg0_1,
arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BCE_disc_sm_v7New(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v7New, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
BCE_disc_sm_v7
| false
| 2,829
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
Down2d
|
import torch
import torch.nn as nn
class Down2d(nn.Module):
"""docstring for Down2d."""
def __init__(self, in_channel, out_channel, kernel, stride, padding):
super(Down2d, self).__init__()
self.c1 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n1 = nn.InstanceNorm2d(out_channel)
self.c2 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n2 = nn.InstanceNorm2d(out_channel)
def forward(self, x):
x1 = self.c1(x)
x1 = self.n1(x1)
x2 = self.c2(x)
x2 = self.n2(x2)
x3 = x1 * torch.sigmoid(x2)
return x3
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel': 4, 'stride':
1, 'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_mul_sigmoid_0(
in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 81
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 81 * x3), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_out_ptr2 + (r2 + 81 * x3), rmask & xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask & xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 81, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(rmask & xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 81.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp26 = tmp24 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp27, 0)
tmp30 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp32 = tl.where(rmask & xmask, tmp30, 0)
tmp33 = tl.sum(tmp32, 1)[:, None]
tmp34 = tmp33 / tmp11
tmp35 = tmp27 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.where(rmask & xmask, tmp37, 0)
tmp40 = tl.sum(tmp39, 1)[:, None]
tmp41 = tmp40 / tmp19
tmp42 = tmp41 + tmp21
tmp43 = libdevice.rsqrt(tmp42)
tmp44 = tmp2 - tmp12
tmp45 = tmp44 * tmp23
tmp46 = tmp26 - tmp34
tmp47 = tmp46 * tmp43
tmp48 = tl.sigmoid(tmp47)
tmp49 = tmp45 * tmp48
tl.store(in_out_ptr0 + (r2 + 81 * x3), tmp2, rmask & xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(in_out_ptr2 + (r2 + 81 * x3), tmp26, rmask & xmask)
tl.debug_barrier()
tl.store(in_out_ptr3 + x3, tmp43, xmask)
tl.store(out_ptr2 + (r2 + 81 * x3), tmp49, rmask & xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp34, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf6 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf3
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf11 = reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf9
buf12 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_mul_sigmoid_0[
grid(16)](buf1, buf5, buf7, buf11, primals_2, primals_5, buf2,
buf8, buf12, 16, 81, XBLOCK=1, num_warps=2, num_stages=1)
del primals_2
del primals_5
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf5, buf7,
buf8, buf11)
class Down2dNew(nn.Module):
"""docstring for Down2d."""
def __init__(self, in_channel, out_channel, kernel, stride, padding):
super(Down2dNew, self).__init__()
self.c1 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n1 = nn.InstanceNorm2d(out_channel)
self.c2 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n2 = nn.InstanceNorm2d(out_channel)
def forward(self, input_0):
primals_1 = self.c1.weight
primals_2 = self.c1.bias
primals_3 = self.c2.weight
primals_5 = self.c2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Shimamura-Lab-SU/SGV
|
Down2d
| false
| 2,830
|
[
"MIT"
] | 0
|
8df3c314532528b8597c5dbb28bdfb23155bee82
|
https://github.com/Shimamura-Lab-SU/SGV/tree/8df3c314532528b8597c5dbb28bdfb23155bee82
|
GCN
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from itertools import *
from time import *
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight)
output = torch.matmul(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
from itertools import *
from time import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_view_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
buf2 = buf0
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_view_0[grid(256)](buf1,
buf2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, primals_4, out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0),
out=buf4)
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), buf5, reinterpret_tensor(primals_2,
(4, 64), (1, 4), 0)
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight)
output = torch.matmul(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc2.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Richard-LYF/SESS-GC
|
GCN
| false
| 2,831
|
[
"MIT"
] | 0
|
2280e5ec8e5c5e20d0bda629b7d05f61bad0bec7
|
https://github.com/Richard-LYF/SESS-GC/tree/2280e5ec8e5c5e20d0bda629b7d05f61bad0bec7
|
EPE
|
import torch
import torch.nn as nn
class EPE(nn.Module):
def __init__(self):
super(EPE, self).__init__()
def forward(self, flow, gt, loss_mask):
loss_map = (flow - gt.detach()) ** 2
loss_map = (loss_map.sum(1, True) + 1e-06) ** 0.5
return loss_map * loss_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 1e-06
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sub_sum_0[grid(64)](arg1_1, arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_sub_sum_1[grid(256)](buf0, arg2_1,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del buf0
return buf1,
class EPENew(nn.Module):
def __init__(self):
super(EPENew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Shreyamkmr/Frame-Interpolation
|
EPE
| false
| 2,832
|
[
"MIT"
] | 0
|
bf5eb768e11fdd55d3f322f0a365db3b190a7903
|
https://github.com/Shreyamkmr/Frame-Interpolation/tree/bf5eb768e11fdd55d3f322f0a365db3b190a7903
|
BCE_disc_sm_v4
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BCE_disc_sm_v4(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v4, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, x, labels):
assert (x >= 0).all() and (x <= 1).all(), 'x is wrong'
assert (labels >= 0).all() and (labels <= 1).all(), 'labels is wrong'
labels = labels + self.lb_sm * (1 - labels)
loss = F.binary_cross_entropy(x, labels, weight=self.weight_list,
reduction='none')
return loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_binary_cross_entropy_mean_mul_rsub_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask)
tmp17 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp22 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp32 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp37 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp47 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp52 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = tmp5 - tmp1
tmp8 = -tmp7
tmp9 = libdevice.log1p(tmp8)
tmp10 = -100.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tmp6 * tmp11
tmp13 = tl_math.log(tmp7)
tmp14 = triton_helpers.maximum(tmp13, tmp10)
tmp15 = tmp5 * tmp14
tmp16 = tmp12 - tmp15
tmp18 = tmp1 - tmp17
tmp19 = tmp18 * tmp3
tmp20 = tmp17 + tmp19
tmp21 = tmp20 - tmp1
tmp23 = -tmp22
tmp24 = libdevice.log1p(tmp23)
tmp25 = triton_helpers.maximum(tmp24, tmp10)
tmp26 = tmp21 * tmp25
tmp27 = tl_math.log(tmp22)
tmp28 = triton_helpers.maximum(tmp27, tmp10)
tmp29 = tmp20 * tmp28
tmp30 = tmp26 - tmp29
tmp31 = tmp16 + tmp30
tmp33 = tmp1 - tmp32
tmp34 = tmp33 * tmp3
tmp35 = tmp32 + tmp34
tmp36 = tmp35 - tmp1
tmp38 = -tmp37
tmp39 = libdevice.log1p(tmp38)
tmp40 = triton_helpers.maximum(tmp39, tmp10)
tmp41 = tmp36 * tmp40
tmp42 = tl_math.log(tmp37)
tmp43 = triton_helpers.maximum(tmp42, tmp10)
tmp44 = tmp35 * tmp43
tmp45 = tmp41 - tmp44
tmp46 = tmp31 + tmp45
tmp48 = tmp1 - tmp47
tmp49 = tmp48 * tmp3
tmp50 = tmp47 + tmp49
tmp51 = tmp50 - tmp1
tmp53 = -tmp52
tmp54 = libdevice.log1p(tmp53)
tmp55 = triton_helpers.maximum(tmp54, tmp10)
tmp56 = tmp51 * tmp55
tmp57 = tl_math.log(tmp52)
tmp58 = triton_helpers.maximum(tmp57, tmp10)
tmp59 = tmp50 * tmp58
tmp60 = tmp56 - tmp59
tmp61 = tmp46 + tmp60
tmp62 = 4.0
tmp63 = tmp61 / tmp62
tl.store(out_ptr0 + x0, tmp63, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_binary_cross_entropy_mean_mul_rsub_0[grid(64)](
arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BCE_disc_sm_v4New(nn.Module):
def __init__(self, weight_list=None, lb_sm=0.2):
super(BCE_disc_sm_v4New, self).__init__()
self.weight_list = weight_list
self.lb_sm = lb_sm
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sampson-Lee/SIB-Net
|
BCE_disc_sm_v4
| false
| 2,833
|
[
"MIT"
] | 0
|
650399082e9237327fa38168ccfc7d48153a1db5
|
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
|
HingeLoss
|
import torch
import torch.nn as nn
class HingeLoss(nn.Module):
def __init__(self):
super(HingeLoss, self).__init__()
self.margin = 1.0
def hinge_loss(self, input, target):
output = self.margin - input.mul(target)
output[output.le(0)] = 0
return output.mean()
def forward(self, input, target):
return self.hinge_loss(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp2
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = tl.where(tmp6, tmp5, tmp4)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class HingeLossNew(nn.Module):
def __init__(self):
super(HingeLossNew, self).__init__()
self.margin = 1.0
def hinge_loss(self, input, target):
output = self.margin - input.mul(target)
output[output.le(0)] = 0
return output.mean()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Siraj-Qazi/BNN-PYNQ
|
HingeLoss
| false
| 2,834
|
[
"BSD-3-Clause"
] | 0
|
b942fe92b3c62b0b877b0a9d5c13e7eb3a234685
|
https://github.com/Siraj-Qazi/BNN-PYNQ/tree/b942fe92b3c62b0b877b0a9d5c13e7eb3a234685
|
GradLoss
|
import torch
import torch.nn as nn
class GradLoss(nn.Module):
def __init__(self):
super(GradLoss, self).__init__()
def forward(self, grad_fake, grad_real):
return torch.mean(torch.abs(grad_real - grad_fake))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class GradLossNew(nn.Module):
def __init__(self):
super(GradLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
SiTae9317/Depth-Estimation-PyTorch
|
GradLoss
| false
| 2,836
|
[
"MIT"
] | 0
|
03b25d5cd2dff665c4435e72aba605a9d710fe01
|
https://github.com/SiTae9317/Depth-Estimation-PyTorch/tree/03b25d5cd2dff665c4435e72aba605a9d710fe01
|
Quantizer
|
import torch
import torch.nn as nn
class QuantizeAct(torch.autograd.Function):
@staticmethod
def forward(ctx, input, numbits):
ctx.save_for_backward(input)
if numbits == 1:
return input.sign()
elif numbits == 2:
return torch.floor(input + 0.5)
else:
return torch.floor(input.add(1).div(2).clamp_(0, 0.999).mul(2 **
numbits - 1)).sub((2 ** numbits - 1) // 2)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None
class Quantizer(nn.Module):
def __init__(self, numbits):
super(Quantizer, self).__init__()
self.numbits = numbits
def forward(self, input):
return QuantizeAct.apply(input, self.numbits)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'numbits': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_div_floor_mul_sub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.999
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = 15.0
tmp10 = tmp8 * tmp9
tmp11 = libdevice.floor(tmp10)
tmp12 = 7.0
tmp13 = tmp11 - tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_div_floor_mul_sub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class QuantizeAct(torch.autograd.Function):
@staticmethod
def forward(ctx, input, numbits):
ctx.save_for_backward(input)
if numbits == 1:
return input.sign()
elif numbits == 2:
return torch.floor(input + 0.5)
else:
return torch.floor(input.add(1).div(2).clamp_(0, 0.999).mul(2 **
numbits - 1)).sub((2 ** numbits - 1) // 2)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None
class QuantizerNew(nn.Module):
def __init__(self, numbits):
super(QuantizerNew, self).__init__()
self.numbits = numbits
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Siraj-Qazi/BNN-PYNQ
|
Quantizer
| false
| 2,837
|
[
"BSD-3-Clause"
] | 0
|
b942fe92b3c62b0b877b0a9d5c13e7eb3a234685
|
https://github.com/Siraj-Qazi/BNN-PYNQ/tree/b942fe92b3c62b0b877b0a9d5c13e7eb3a234685
|
h_sigmoid
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max / 6
def forward(self, x):
return self.relu(x + 3) * self.h_max
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_sigmoidNew(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoidNew, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max / 6
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SpectrePrediction/micronet
|
h_sigmoid
| false
| 2,838
|
[
"MIT"
] | 0
|
f56269c7a8744f750e9870f0baa9fb6e68f27b9c
|
https://github.com/SpectrePrediction/micronet/tree/f56269c7a8744f750e9870f0baa9fb6e68f27b9c
|
StyledConv
|
import math
import torch
from torch import nn
from torch.nn import functional as F
def upsample(in_tens, out_H=64):
in_H = in_tens.shape[2]
scale_factor = 1.0 * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear',
align_corners=False)(in_tens)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
input = input.permute(0, 2, 3, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out[:, :, ::down_y, ::down_x]
return out
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
"""
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
"""
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1
], pad[0], pad[1])
return out
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return F.leaky_relu(input + bias, negative_slope=negative_slope) * scale
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
bias = self.bias[None, :, None, None]
try:
out = F.leaky_relu(input + bias, negative_slope=self.negative_slope
) * self.scale
except Exception:
code.interact('Something is wrong with bias', local={**globals(
), **locals()})
return out
class StyledConv(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
out = self.activate(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4,
'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r5 = rindex
x0 = xindex % 4
r3 = rindex // 16
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp4 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp12, xmask)
tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 25
x2 = xindex // 100
x1 = xindex // 25 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = 1.4142135623730951
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0
del buf0
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5,
buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4,
4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1))
buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32)
buf8 = torch.ops.aten.normal_functional.default(buf7)
del buf7
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32
)
triton_poi_fused_add_leaky_relu_mul_3[grid(400)](buf6, primals_6,
buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_6
del primals_7
return buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5,
(16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10
def upsample(in_tens, out_H=64):
in_H = in_tens.shape[2]
scale_factor = 1.0 * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear',
align_corners=False)(in_tens)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
input = input.permute(0, 2, 3, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out[:, :, ::down_y, ::down_x]
return out
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
"""
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
"""
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1
], pad[0], pad[1])
return out
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return F.leaky_relu(input + bias, negative_slope=negative_slope) * scale
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
bias = self.bias[None, :, None, None]
try:
out = F.leaky_relu(input + bias, negative_slope=self.negative_slope
) * self.scale
except Exception:
code.interact('Something is wrong with bias', local={**globals(
), **locals()})
return out
class StyledConvNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input_0, input_1):
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_6 = self.noise.weight
primals_7 = self.activate.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ShinoharaHare/stylegan2-pytorch
|
StyledConv
| false
| 2,839
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 0
|
5a4b1c4e9753681bc1694195f3b2391527c1b525
|
https://github.com/ShinoharaHare/stylegan2-pytorch/tree/5a4b1c4e9753681bc1694195f3b2391527c1b525
|
ConvFC
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvFC(nn.Module):
def __init__(self, conv_in_channels, conv_out_channels, input_size,
hidden_size, output_size, kernel_size=3):
super(ConvFC, self).__init__()
self.conv_out_channels = conv_out_channels
self.layer1 = nn.Conv2d(conv_in_channels, conv_out_channels,
kernel_size=kernel_size)
self.conv_result_size = input_size - kernel_size + 1
self.fc_size = self.conv_result_size ** 2 * self.conv_out_channels
self.layer2 = nn.Linear(self.fc_size, hidden_size)
self.layer3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
assert len(x.shape) >= 3
if len(x.shape) == 3:
x = x.unsqueeze(0)
conv_output = F.leaky_relu(self.layer1(x))
output = conv_output.reshape(-1, self.fc_size)
output = F.leaky_relu(self.layer2(output))
output = self.layer3(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'conv_in_channels': 4, 'conv_out_channels': 4,
'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(64)](buf0, primals_3,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (4, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(16)](buf3, primals_5, buf4, buf5,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = buf3
del buf3
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_7
return buf6, primals_1, primals_2, buf1, reinterpret_tensor(buf2, (4,
16), (16, 1), 0), buf4, buf5, primals_6, primals_4
class ConvFCNew(nn.Module):
def __init__(self, conv_in_channels, conv_out_channels, input_size,
hidden_size, output_size, kernel_size=3):
super(ConvFCNew, self).__init__()
self.conv_out_channels = conv_out_channels
self.layer1 = nn.Conv2d(conv_in_channels, conv_out_channels,
kernel_size=kernel_size)
self.conv_result_size = input_size - kernel_size + 1
self.fc_size = self.conv_result_size ** 2 * self.conv_out_channels
self.layer2 = nn.Linear(self.fc_size, hidden_size)
self.layer3 = nn.Linear(hidden_size, output_size)
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.layer3.weight
primals_7 = self.layer3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Silent-Zebra/sequential_social_dilemma_games
|
ConvFC
| false
| 2,840
|
[
"MIT"
] | 0
|
8cf8faebf7de727bac55bd8020be7cd9cc243ccc
|
https://github.com/Silent-Zebra/sequential_social_dilemma_games/tree/8cf8faebf7de727bac55bd8020be7cd9cc243ccc
|
h_swish
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max / 6
def forward(self, x):
return self.relu(x + 3) * self.h_max
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, x):
return x * self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max / 6
def forward(self, x):
return self.relu(x + 3) * self.h_max
class h_swishNew(nn.Module):
def __init__(self, inplace=True):
super(h_swishNew, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SpectrePrediction/micronet
|
h_swish
| false
| 2,841
|
[
"MIT"
] | 0
|
f56269c7a8744f750e9870f0baa9fb6e68f27b9c
|
https://github.com/SpectrePrediction/micronet/tree/f56269c7a8744f750e9870f0baa9fb6e68f27b9c
|
NeuralNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(NeuralNet, self).__init__()
self.layer1 = nn.Linear(input_size, hidden_size)
self.layer2 = nn.Linear(hidden_size, hidden_size)
self.layer3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
output = F.leaky_relu(self.layer1(x))
output = F.leaky_relu(self.layer2(output))
output = self.layer3(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf3, primals_5, buf4,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = buf3
del buf3
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), primals_6, primals_4
class NeuralNetNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(NeuralNetNew, self).__init__()
self.layer1 = nn.Linear(input_size, hidden_size)
self.layer2 = nn.Linear(hidden_size, hidden_size)
self.layer3 = nn.Linear(hidden_size, output_size)
def forward(self, input_0):
primals_1 = self.layer1.weight
primals_2 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.layer3.weight
primals_7 = self.layer3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Silent-Zebra/sequential_social_dilemma_games
|
NeuralNet
| false
| 2,842
|
[
"MIT"
] | 0
|
8cf8faebf7de727bac55bd8020be7cd9cc243ccc
|
https://github.com/Silent-Zebra/sequential_social_dilemma_games/tree/8cf8faebf7de727bac55bd8020be7cd9cc243ccc
|
cSE
|
import torch
import torch.nn as nn
class cSE(nn.Module):
def __init__(self, out_channels):
super().__init__()
self.linear1 = nn.Linear(in_features=out_channels, out_features=int
(out_channels / 2), bias=False)
self.linear2 = nn.Linear(in_features=int(out_channels / 2),
out_features=out_channels, bias=False)
def forward(self, x):
b, c, _, _ = x.size()
y = nn.AdaptiveAvgPool2d(1)(x).view(b, c)
y = self.linear1(y)
y = torch.relu(y)
y = self.linear2(y)
y = torch.sigmoid(y).view(b, c, 1, 1)
return x * y.expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (4, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8)](buf3, 8, XBLOCK=8, num_warps=1,
num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_3, (2, 4), (1, 2
), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0
), buf3, buf4, primals_3
class cSENew(nn.Module):
def __init__(self, out_channels):
super().__init__()
self.linear1 = nn.Linear(in_features=out_channels, out_features=int
(out_channels / 2), bias=False)
self.linear2 = nn.Linear(in_features=int(out_channels / 2),
out_features=out_channels, bias=False)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Soo95/segmentation_models.pytorch
|
cSE
| false
| 2,843
|
[
"MIT"
] | 0
|
9131b336d6939dfabbadecd0d56d382283f46803
|
https://github.com/Soo95/segmentation_models.pytorch/tree/9131b336d6939dfabbadecd0d56d382283f46803
|
AUXModule
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AUXModule(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = nn.Linear(in_features, out_features)
def forward(self, x):
x = F.adaptive_max_pool2d(x, output_size=(1, 1))
x = x.view(-1, x.size(1))
x = self.linear(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + x0, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0[grid(16)](primals_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf1)
del primals_2
del primals_3
return buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
class AUXModuleNew(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = nn.Linear(in_features, out_features)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Soo95/segmentation_models.pytorch
|
AUXModule
| false
| 2,844
|
[
"MIT"
] | 0
|
9131b336d6939dfabbadecd0d56d382283f46803
|
https://github.com/Soo95/segmentation_models.pytorch/tree/9131b336d6939dfabbadecd0d56d382283f46803
|
h_tanh
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class h_tanh(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_tanh, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, x):
return self.relu(x + 3) * self.h_max / 3 - self.h_max
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = 0.3333333333333333
tmp10 = tmp8 * tmp9
tmp11 = tmp10 - tmp7
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_sub_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_tanhNew(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_tanhNew, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
SpectrePrediction/micronet
|
h_tanh
| false
| 2,845
|
[
"MIT"
] | 0
|
f56269c7a8744f750e9870f0baa9fb6e68f27b9c
|
https://github.com/SpectrePrediction/micronet/tree/f56269c7a8744f750e9870f0baa9fb6e68f27b9c
|
PolarNet
|
import torch
import torch.utils.data
import torch.nn as nn
class PolarNet(torch.nn.Module):
def __init__(self, num_hid):
super(PolarNet, self).__init__()
self.layer1 = nn.Linear(2, num_hid)
self.layer2 = nn.Linear(num_hid, 1)
def forward(self, input):
r = torch.sqrt(input[:, 0] * input[:, 0] + input[:, 1] * input[:, 1]
).view(-1, 1)
a = torch.atan2(input[:, 1], input[:, 0]).view(-1, 1)
input_polar = torch.cat((r, a), 1).view(-1, 2)
self.hid1 = torch.tanh(self.layer1(input_polar))
output = self.layer2(self.hid1)
output = torch.sigmoid(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_hid': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * (x1 // 16) + x1 % 16), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr0 + (16 + 64 * (x1 // 16) + x1 % 16), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp16 = tl.load(in_ptr0 + (16 + 64 * (x1 // 16) + x1 % 16), tmp13 &
xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr0 + (64 * (x1 // 16) + x1 % 16), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = libdevice.atan2(tmp16, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp12, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 2), (2, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_view_0[grid(128)](primals_1, buf0, 128, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (2, 4), (1, 2
), 0), out=buf1)
del primals_2
buf2 = buf1
del buf1
triton_poi_fused_tanh_1[grid(256)](buf2, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 1), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_sigmoid_2[grid(64)](buf4, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
return buf4, buf2, buf0, buf2, buf4, primals_4
class PolarNetNew(torch.nn.Module):
def __init__(self, num_hid):
super(PolarNetNew, self).__init__()
self.layer1 = nn.Linear(2, num_hid)
self.layer2 = nn.Linear(num_hid, 1)
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Spacider/comp9444_assignment
|
PolarNet
| false
| 2,846
|
[
"Apache-2.0"
] | 0
|
149db9a562c579d03b3ea06c9de2020c8f3ef310
|
https://github.com/Spacider/comp9444_assignment/tree/149db9a562c579d03b3ea06c9de2020c8f3ef310
|
Conv2dLayer
|
import torch
import torch.nn as nn
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.size(0) == 1:
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data),
u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class Conv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True):
super(Conv2dLayer, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'ln':
self.norm = LayerNorm(out_channels)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if sn:
self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding=0, dilation=dilation))
else:
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=0, dilation=dilation)
def forward(self, x):
x = self.pad(x)
x = self.conv2d(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mv_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.load(in_ptr0 + (64 + r0), None)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + (128 + r0), None)
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (192 + r0), None)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-12
tmp25 = tmp23 + tmp24
tmp26 = tmp18 / tmp25
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None)
@triton.jit
def triton_per_fused_div_mv_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4 = tmp1 / tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_2(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1e-12
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None)
@triton.jit
def triton_per_fused_dot_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64,), (1,), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
buf16 = empty_strided_cuda((64,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mv_0[grid(1)](buf2,
primals_4, primals_2, buf0, buf16, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_div_mv_1[grid(4)](primals_4, buf0, buf2, buf3, 4,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_2[grid(1)](buf3, buf5,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_dot_3[grid(1)](buf5, buf3, buf6, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_4[grid(256)](primals_4, buf6, buf7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(primals_1, buf7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1, 1), (4, 1, 1, 1))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(16)
](buf9, primals_5, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf11 = torch.ops.aten.set_.source_Tensor(primals_2, buf5)
assert_size_stride(buf11, (4,), (1,))
del buf0
buf17 = torch.ops.aten.set_.source_Tensor(primals_3, buf16)
assert_size_stride(buf17, (64,), (1,))
del primals_3
return (buf9, buf7, primals_1, primals_2, primals_4, buf2, buf5, buf6,
buf7, buf10)
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.size(0) == 1:
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data),
u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class Conv2dLayerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True):
super(Conv2dLayerNew, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'ln':
self.norm = LayerNorm(out_channels)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if sn:
self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding=0, dilation=dilation))
else:
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=0, dilation=dilation)
def forward(self, input_0):
primals_2 = self.conv2d.module.bias
primals_5 = self.conv2d.module.weight_u
primals_3 = self.conv2d.module.weight_v
primals_1 = self.conv2d.module.weight_bar
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Sheroa/Video_Colorization
|
Conv2dLayer
| false
| 2,847
|
[
"MIT"
] | 0
|
5c772ac0ec944814cd8be0a94b0746116b11ac01
|
https://github.com/Sheroa/Video_Colorization/tree/5c772ac0ec944814cd8be0a94b0746116b11ac01
|
NetLin
|
import torch
import torch.utils.data
import torch.nn.functional as F
import torch.nn as nn
class NetLin(nn.Module):
def __init__(self):
super(NetLin, self).__init__()
self.liner1 = nn.Linear(28 * 28, 10)
def forward(self, x):
x = x.view(-1, 784)
output = self.liner1(x)
output = F.log_softmax(input=output, dim=1)
return output
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__log_softmax_0(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (10, 784), (784, 1))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (784, 10), (1, 784), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__log_softmax_0[grid(4)](buf0, buf3, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf0
return buf3, primals_1, buf3
class NetLinNew(nn.Module):
def __init__(self):
super(NetLinNew, self).__init__()
self.liner1 = nn.Linear(28 * 28, 10)
def forward(self, input_0):
primals_2 = self.liner1.weight
primals_3 = self.liner1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Spacider/comp9444_assignment
|
NetLin
| false
| 2,848
|
[
"Apache-2.0"
] | 0
|
149db9a562c579d03b3ea06c9de2020c8f3ef310
|
https://github.com/Spacider/comp9444_assignment/tree/149db9a562c579d03b3ea06c9de2020c8f3ef310
|
EncModel
|
import torch
import torch.utils.data
class EncModel(torch.nn.Module):
def __init__(self, num_input, num_hid, num_out):
super(EncModel, self).__init__()
self.in_hid = torch.nn.Linear(num_input, num_hid)
self.hid_out = torch.nn.Linear(num_hid, num_out)
def forward(self, input):
hid_sum = self.in_hid(input)
hidden = torch.tanh(hid_sum)
out_sum = self.hid_out(hidden)
output = torch.sigmoid(out_sum)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_input': 4, 'num_hid': 4, 'num_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class EncModelNew(torch.nn.Module):
def __init__(self, num_input, num_hid, num_out):
super(EncModelNew, self).__init__()
self.in_hid = torch.nn.Linear(num_input, num_hid)
self.hid_out = torch.nn.Linear(num_hid, num_out)
def forward(self, input_0):
primals_1 = self.in_hid.weight
primals_2 = self.in_hid.bias
primals_4 = self.hid_out.weight
primals_5 = self.hid_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Spacider/comp9444_assignment
|
EncModel
| false
| 2,849
|
[
"Apache-2.0"
] | 0
|
149db9a562c579d03b3ea06c9de2020c8f3ef310
|
https://github.com/Spacider/comp9444_assignment/tree/149db9a562c579d03b3ea06c9de2020c8f3ef310
|
NetFull
|
import torch
import torch.utils.data
import torch.nn.functional as F
import torch.nn as nn
class NetFull(nn.Module):
def __init__(self):
super(NetFull, self).__init__()
self.liner1 = nn.Linear(28 * 28, 400)
self.liner2 = nn.Linear(400, 200)
self.liner3 = nn.Linear(200, 10)
def forward(self, x):
x = x.view(-1, 784)
input_output = F.tanh(self.liner1(x))
hidden_1_output = F.tanh(self.liner2(input_output))
hidden_2_output = self.liner3(hidden_1_output)
output = F.log_softmax(input=hidden_2_output, dim=1)
return output
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_per_fused__log_softmax_2(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400,), (1,))
assert_size_stride(primals_4, (200, 400), (400, 1))
assert_size_stride(primals_5, (200,), (1,))
assert_size_stride(primals_6, (10, 200), (200, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(1600)](buf1, primals_3, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 200), (200, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (400, 200), (
1, 400), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_tanh_1[grid(800)](buf3, primals_5, 800, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(200, 10), (1, 200), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_2[grid(4)](buf4, buf7, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf4
return buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4
class NetFullNew(nn.Module):
def __init__(self):
super(NetFullNew, self).__init__()
self.liner1 = nn.Linear(28 * 28, 400)
self.liner2 = nn.Linear(400, 200)
self.liner3 = nn.Linear(200, 10)
def forward(self, input_0):
primals_2 = self.liner1.weight
primals_3 = self.liner1.bias
primals_4 = self.liner2.weight
primals_5 = self.liner2.bias
primals_6 = self.liner3.weight
primals_7 = self.liner3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Spacider/comp9444_assignment
|
NetFull
| false
| 2,850
|
[
"Apache-2.0"
] | 0
|
149db9a562c579d03b3ea06c9de2020c8f3ef310
|
https://github.com/Spacider/comp9444_assignment/tree/149db9a562c579d03b3ea06c9de2020c8f3ef310
|
UpsampleConv
|
import torch
import torch.nn as nn
def spectral_norm(layer, n_iters=1):
return torch.nn.utils.spectral_norm(layer, n_power_iterations=n_iters)
class UpsampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
spec_norm=False):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
self.conv = spectral_norm(self.conv)
self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
def forward(self, inputs):
output = inputs
output = torch.cat([output, output, output, output], dim=1)
output = self.pixelshuffle(output)
return self.conv(output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pixel_shuffle_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 4
x2 = xindex // 8 % 2
x3 = xindex // 16 % 4
x5 = xindex // 256
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3 + 16 * x0 + 32 * x2 + 64 * x5),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x6, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_pixel_shuffle_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 8,
8), (256, 64, 8, 1), 0), primals_2, stride=(1, 1), padding=(1,
1), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64,
8, 1), 0)
def spectral_norm(layer, n_iters=1):
return torch.nn.utils.spectral_norm(layer, n_power_iterations=n_iters)
class UpsampleConvNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
spec_norm=False):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
self.conv = spectral_norm(self.conv)
self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sriram-Ravula/Inverse_Meta
|
UpsampleConv
| false
| 2,851
|
[
"MIT"
] | 0
|
c6c1e4ae0d670093156249c60d74373b22d61f01
|
https://github.com/Sriram-Ravula/Inverse_Meta/tree/c6c1e4ae0d670093156249c60d74373b22d61f01
|
VarianceNorm2d
|
import torch
import torch.nn as nn
class VarianceNorm2d(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, x):
vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(vars + 1e-05)
out = self.alpha.view(-1, self.num_features, 1, 1) * h
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mul_sqrt_var_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 15.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp23 = tmp0 / tmp21
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp24, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mul_sqrt_var_0[grid(16)](buf3, primals_1,
primals_2, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_2
return buf4, primals_1, buf3
class VarianceNorm2dNew(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, input_0):
primals_2 = self.alpha
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Sriram-Ravula/Inverse_Meta
|
VarianceNorm2d
| false
| 2,852
|
[
"MIT"
] | 0
|
c6c1e4ae0d670093156249c60d74373b22d61f01
|
https://github.com/Sriram-Ravula/Inverse_Meta/tree/c6c1e4ae0d670093156249c60d74373b22d61f01
|
ResConv2dLayer
|
import torch
import torch.nn as nn
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.size(0) == 1:
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data),
u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class Conv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True):
super(Conv2dLayer, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'ln':
self.norm = LayerNorm(out_channels)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if sn:
self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding=0, dilation=dilation))
else:
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=0, dilation=dilation)
def forward(self, x):
x = self.pad(x)
x = self.conv2d(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class ResConv2dLayer(nn.Module):
def __init__(self, channels, kernel_size=3, stride=1, padding=1,
dilation=1, pad_type='zero', activation='lrelu', norm='none', sn=True):
super(ResConv2dLayer, self).__init__()
self.conv2d = Conv2dLayer(channels, channels, kernel_size, stride,
padding, dilation, pad_type, activation, norm, sn)
def forward(self, x):
residual = x
out = self.conv2d(x)
out = out + residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.load(in_ptr0 + (36 + r0), rmask, other=0.0)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + (72 + r0), rmask, other=0.0)
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (108 + r0), rmask, other=0.0)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(rmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-12
tmp26 = tmp24 + tmp25
tmp27 = tmp18 / tmp26
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, rmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp27, rmask)
@triton.jit
def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 36 * x0), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4 = tmp1 / tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask & xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1e-12
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None)
@triton.jit
def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_6(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (36,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((36,), (1,), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
buf17 = empty_strided_cuda((36,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf3,
primals_4, primals_2, buf1, buf17, 1, 36, XBLOCK=1, num_warps=2,
num_stages=1)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_div_mv_2[grid(4)](primals_4, buf1, buf3, buf4, 4,
36, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf4, buf6,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_dot_4[grid(1)](buf6, buf4, buf7, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_div_5[grid(144)](primals_4, buf7, buf8, 144,
XBLOCK=128, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_6[grid
(256)](buf9, primals_5, primals_1, buf10, buf11, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf9
del primals_1
del primals_5
buf12 = torch.ops.aten.set_.source_Tensor(primals_2, buf6)
assert_size_stride(buf12, (4,), (1,))
del buf1
buf18 = torch.ops.aten.set_.source_Tensor(primals_3, buf17)
assert_size_stride(buf18, (36,), (1,))
del primals_3
return (buf10, buf8, primals_2, primals_4, buf0, buf3, buf6, buf7, buf8,
buf11)
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.size(0) == 1:
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data),
u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class Conv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True):
super(Conv2dLayer, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'ln':
self.norm = LayerNorm(out_channels)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if sn:
self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding=0, dilation=dilation))
else:
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=0, dilation=dilation)
def forward(self, x):
x = self.pad(x)
x = self.conv2d(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class ResConv2dLayerNew(nn.Module):
def __init__(self, channels, kernel_size=3, stride=1, padding=1,
dilation=1, pad_type='zero', activation='lrelu', norm='none', sn=True):
super(ResConv2dLayerNew, self).__init__()
self.conv2d = Conv2dLayer(channels, channels, kernel_size, stride,
padding, dilation, pad_type, activation, norm, sn)
def forward(self, input_0):
primals_2 = self.conv2d.conv2d.module.bias
primals_5 = self.conv2d.conv2d.module.weight_u
primals_3 = self.conv2d.conv2d.module.weight_v
primals_4 = self.conv2d.conv2d.module.weight_bar
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Sheroa/Video_Colorization
|
ResConv2dLayer
| false
| 2,853
|
[
"MIT"
] | 0
|
5c772ac0ec944814cd8be0a94b0746116b11ac01
|
https://github.com/Sheroa/Video_Colorization/tree/5c772ac0ec944814cd8be0a94b0746116b11ac01
|
Attention
|
import torch
import torch.nn as nn
class Attention(nn.Module):
def __init__(self, encoder_dim, hidden_dim):
super(Attention, self).__init__()
self.hidden_lin = nn.Linear(hidden_dim, hidden_dim)
self.tanh = nn.Tanh()
self.img_lin = nn.Linear(encoder_dim, hidden_dim)
self.softmax = nn.Softmax(dim=1)
self.concat_lin = nn.Linear(hidden_dim, 1)
def forward(self, img_features, hidden_state):
hidden_h = self.hidden_lin(hidden_state).unsqueeze(1)
img_s = self.img_lin(img_features)
att_ = self.tanh(img_s + hidden_h)
e_ = self.concat_lin(att_).squeeze(2)
alpha = self.softmax(e_)
context_vec = (img_features * alpha.unsqueeze(2)).sum(1)
return context_vec, alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_dim': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = xindex // 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(out_ptr0 + x6, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x1 = xindex // 4 % 16
x3 = xindex // 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (48 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp0 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp0 * tmp9
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + x5, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(1024)](buf1, primals_5, buf0,
primals_2, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_sum_3[grid(1024)](primals_6, buf6, buf7, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
return buf7, buf6, primals_6, reinterpret_tensor(primals_3, (64, 4), (4,
1), 0), buf2, buf6, primals_7
class AttentionNew(nn.Module):
def __init__(self, encoder_dim, hidden_dim):
super(AttentionNew, self).__init__()
self.hidden_lin = nn.Linear(hidden_dim, hidden_dim)
self.tanh = nn.Tanh()
self.img_lin = nn.Linear(encoder_dim, hidden_dim)
self.softmax = nn.Softmax(dim=1)
self.concat_lin = nn.Linear(hidden_dim, 1)
def forward(self, input_0, input_1):
primals_1 = self.hidden_lin.weight
primals_2 = self.hidden_lin.bias
primals_4 = self.img_lin.weight
primals_5 = self.img_lin.bias
primals_7 = self.concat_lin.weight
primals_8 = self.concat_lin.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
|
Soumya1612-Rasha/Image-Captioning
|
Attention
| false
| 2,854
|
[
"MIT"
] | 0
|
63439754567ced2dbe762aed150ba5476029781c
|
https://github.com/Soumya1612-Rasha/Image-Captioning/tree/63439754567ced2dbe762aed150ba5476029781c
|
TransposeConv2dLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.size(0) == 1:
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data),
u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class Conv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True):
super(Conv2dLayer, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'ln':
self.norm = LayerNorm(out_channels)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if sn:
self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding=0, dilation=dilation))
else:
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=0, dilation=dilation)
def forward(self, x):
x = self.pad(x)
x = self.conv2d(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class TransposeConv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True, scale_factor=2):
super(TransposeConv2dLayer, self).__init__()
self.scale_factor = scale_factor
self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size,
stride, padding, dilation, pad_type, activation, norm, sn)
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode='nearest')
x = self.conv2d(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.load(in_ptr0 + (64 + r0), None)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp9 = tl.load(in_ptr0 + (128 + r0), None)
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (192 + r0), None)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-12
tmp25 = tmp23 + tmp24
tmp26 = tmp18 / tmp25
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None)
@triton.jit
def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4 = tmp1 / tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1e-12
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None)
@triton.jit
def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x3, tmp7, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64,), (1,), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
buf17 = empty_strided_cuda((64,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf3,
primals_4, primals_2, buf1, buf17, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_div_mv_2[grid(4)](primals_4, buf1, buf3, buf4, 4,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf4, buf6,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_dot_4[grid(1)](buf6, buf4, buf7, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_5[grid(256)](primals_4, buf7, buf8, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 5, 5), (100, 25, 5, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(400)
](buf10, primals_5, buf11, 400, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
buf12 = torch.ops.aten.set_.source_Tensor(primals_2, buf6)
assert_size_stride(buf12, (4,), (1,))
del buf1
buf18 = torch.ops.aten.set_.source_Tensor(primals_3, buf17)
assert_size_stride(buf18, (64,), (1,))
del primals_3
return (buf10, buf8, primals_2, primals_4, buf0, buf3, buf6, buf7, buf8,
buf11)
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.size(0) == 1:
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + '_u')
v = getattr(self.module, self.name + '_v')
w = getattr(self.module, self.name + '_bar')
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data),
u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
getattr(self.module, self.name + '_u')
getattr(self.module, self.name + '_v')
getattr(self.module, self.name + '_bar')
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + '_u', u)
self.module.register_parameter(self.name + '_v', v)
self.module.register_parameter(self.name + '_bar', w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class Conv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True):
super(Conv2dLayer, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'ln':
self.norm = LayerNorm(out_channels)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if sn:
self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding=0, dilation=dilation))
else:
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=0, dilation=dilation)
def forward(self, x):
x = self.pad(x)
x = self.conv2d(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class TransposeConv2dLayerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, pad_type='zero', activation='lrelu', norm=
'none', sn=True, scale_factor=2):
super(TransposeConv2dLayerNew, self).__init__()
self.scale_factor = scale_factor
self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size,
stride, padding, dilation, pad_type, activation, norm, sn)
def forward(self, input_0):
primals_2 = self.conv2d.conv2d.module.bias
primals_5 = self.conv2d.conv2d.module.weight_u
primals_3 = self.conv2d.conv2d.module.weight_v
primals_1 = self.conv2d.conv2d.module.weight_bar
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Sheroa/Video_Colorization
|
TransposeConv2dLayer
| false
| 2,855
|
[
"MIT"
] | 0
|
5c772ac0ec944814cd8be0a94b0746116b11ac01
|
https://github.com/Sheroa/Video_Colorization/tree/5c772ac0ec944814cd8be0a94b0746116b11ac01
|
ConvMeanPool
|
import torch
import torch.nn as nn
def spectral_norm(layer, n_iters=1):
return torch.nn.utils.spectral_norm(layer, n_power_iterations=n_iters)
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False, spec_norm=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
conv = spectral_norm(conv)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
conv = spectral_norm(conv)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x4 = xindex // 2
x2 = xindex // 4 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), xmask, eviction_policy
='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp6 = tmp5 + tmp1
tmp7 = tmp4 + tmp6
tmp9 = tmp8 + tmp1
tmp10 = tmp7 + tmp9
tmp12 = tmp11 + tmp1
tmp13 = tmp10 + tmp12
tmp14 = 0.25
tmp15 = tmp13 * tmp14
tl.store(out_ptr0 + x5, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](buf0, primals_2, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3
def spectral_norm(layer, n_iters=1):
return torch.nn.utils.spectral_norm(layer, n_power_iterations=n_iters)
class ConvMeanPoolNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False, spec_norm=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
conv = spectral_norm(conv)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
conv = spectral_norm(conv)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sriram-Ravula/ncsnv2
|
ConvMeanPool
| false
| 2,856
|
[
"MIT"
] | 0
|
f610b59441a34063fae1c02aa06837b7eec95c03
|
https://github.com/Sriram-Ravula/ncsnv2/tree/f610b59441a34063fae1c02aa06837b7eec95c03
|
Encoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
"""
def __init__(self, L, N):
super(Encoder, self).__init__()
self.L, self.N = L, N
self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias=
False)
def forward(self, mixture):
"""
Args:
mixture: [M, T], M is batch size, T is #samples
Returns:
mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1
"""
mixture = torch.unsqueeze(mixture, 1)
mixture_w = F.relu(self.conv1d_U(mixture))
return mixture_w
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'L': 4, 'N': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (4, 4, 1), (4, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 0), buf2
class EncoderNew(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
"""
def __init__(self, L, N):
super(EncoderNew, self).__init__()
self.L, self.N = L, N
self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias=
False)
def forward(self, input_0):
primals_2 = self.conv1d_U.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Stanwang1210/HW3_1_Source_Seperation
|
Encoder
| false
| 2,857
|
[
"MIT"
] | 0
|
8c05850fa4f0f0845c460f9afd06fd8fe3e29dc9
|
https://github.com/Stanwang1210/HW3_1_Source_Seperation/tree/8c05850fa4f0f0845c460f9afd06fd8fe3e29dc9
|
CombineSlices
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
class CombineSlices(nn.Module):
def __init__(self, slice_dim=2):
super().__init__()
self.slice_dim = slice_dim
def forward(self, x):
return torch.index_select(x, dim=self.slice_dim, index=torch.tensor
(0, device=x.device))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_select_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class CombineSlicesNew(nn.Module):
def __init__(self, slice_dim=2):
super().__init__()
self.slice_dim = slice_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Samuel-van-Gurp/fastMRI
|
CombineSlices
| false
| 2,858
|
[
"MIT"
] | 0
|
0b1884a1c218961f81199144057ffcfde29a86ad
|
https://github.com/Samuel-van-Gurp/fastMRI/tree/0b1884a1c218961f81199144057ffcfde29a86ad
|
JS_div
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class JS_div(nn.Module):
def __init__(self, margin=0.1):
super(JS_div, self).__init__()
self.margin = margin
self.dist = nn.CosineSimilarity(dim=0)
self.KLDivloss = nn.KLDivLoss(reduction='batchmean')
def forward(self, feat1, feat2, get_softmax=True):
if get_softmax:
feat11 = F.softmax(feat1)
feat22 = F.softmax(feat2)
log_mean_output = ((feat11 + feat22) / 2).log()
dis_final = (self.KLDivloss(log_mean_output, feat11) + self.
KLDivloss(log_mean_output, feat22)) / 2
return dis_final
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused__softmax_add_div_log_mul_sub_sum_xlogy_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + r3, None)
tmp10 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp9 / tmp16
tmp18 = libdevice.isnan(tmp8).to(tl.int1)
tmp19 = 0.0
tmp20 = tmp8 == tmp19
tmp21 = tl_math.log(tmp8)
tmp22 = tmp8 * tmp21
tmp23 = tl.where(tmp20, tmp19, tmp22)
tmp24 = float('nan')
tmp25 = tl.where(tmp18, tmp24, tmp23)
tmp26 = tmp8 + tmp17
tmp27 = 0.5
tmp28 = tmp26 * tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp8 * tmp29
tmp31 = tmp25 - tmp30
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0))
tmp35 = libdevice.isnan(tmp17).to(tl.int1)
tmp36 = tmp17 == tmp19
tmp37 = tl_math.log(tmp17)
tmp38 = tmp17 * tmp37
tmp39 = tl.where(tmp36, tmp19, tmp38)
tmp40 = tl.where(tmp35, tmp24, tmp39)
tmp41 = tmp17 * tmp29
tmp42 = tmp40 - tmp41
tmp43 = tl.broadcast_to(tmp42, [RBLOCK])
tmp45 = triton_helpers.promote_to_tensor(tl.sum(tmp43, 0))
tmp46 = 0.25
tmp47 = tmp34 * tmp46
tmp48 = tmp45 * tmp46
tmp49 = tmp47 + tmp48
tmp50 = tmp49 * tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp50, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf6 = buf4
del buf4
triton_per_fused__softmax_add_div_log_mul_sub_sum_xlogy_1[grid(1)](buf6
, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf6,
class JS_divNew(nn.Module):
def __init__(self, margin=0.1):
super(JS_divNew, self).__init__()
self.margin = margin
self.dist = nn.CosineSimilarity(dim=0)
self.KLDivloss = nn.KLDivLoss(reduction='batchmean')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Sun915/MCALN
|
JS_div
| false
| 2,859
|
[
"MIT"
] | 0
|
e52600fddc62922148480ab9dce6aefc1d3788eb
|
https://github.com/Sun915/MCALN/tree/e52600fddc62922148480ab9dce6aefc1d3788eb
|
GlobalAttention
|
import torch
from torch import nn
class GlobalAttention(nn.Module):
def __init__(self, dims):
super(GlobalAttention, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(dims, dims, 1)
def forward(self, x, y):
att = torch.sigmoid(self.conv(self.pool(x + y)))
return x * att + y * (1 - att)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dims': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 16.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = 1.0
tmp6 = tmp5 - tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_0[grid(16)](buf1, primals_1, primals_2,
16, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, primals_4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_2[grid(256)](primals_1, buf3,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf4, primals_1, primals_2, primals_3, buf1, buf3
class GlobalAttentionNew(nn.Module):
def __init__(self, dims):
super(GlobalAttentionNew, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(dims, dims, 1)
def forward(self, input_0, input_1):
primals_3 = self.conv.weight
primals_4 = self.conv.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
StefOe/selection-masks
|
GlobalAttention
| false
| 2,860
|
[
"BSD-2-Clause"
] | 0
|
e59487bffe3c30bdab7a6425bed01f6adeda4f67
|
https://github.com/StefOe/selection-masks/tree/e59487bffe3c30bdab7a6425bed01f6adeda4f67
|
GAT
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import *
from time import *
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
图注意力层
input: (B,N,C_in)
output: (B,N,C_out)
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, inp, adj):
"""
inp: input_fea [B,N, in_features] in_features表示节点的输入特征向量元素个数
adj: 图的邻接矩阵 [N, N] 非零即一,数据结构基本知识
"""
h = torch.matmul(inp, self.W)
N = h.size()[1]
a_input = torch.cat([h.repeat(1, 1, N).view(-1, N * N, self.
out_features), h.repeat(1, N, 1)], dim=-1).view(-1, N, N, 2 *
self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -1000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GAT(nn.Module):
def __init__(self, n_feat, n_hid, n_class, dropout, alpha=0.2, n_heads=4):
"""Dense version of GAT
n_heads 表示有几个GAL层,最后进行拼接在一起,类似self-attention
从不同的子空间进行抽取特征。
"""
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(n_feat, n_hid, dropout=
dropout, alpha=alpha, concat=True) for _ in range(n_heads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(n_hid * n_heads, n_class,
dropout=dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=2)
None
x = F.dropout(x, self.dropout, training=self.training)
None
x = F.elu(self.out_att(x, adj))
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_feat': 4, 'n_hid': 4, 'n_class': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from itertools import *
from time import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + (4 * x1 + x0) %
16 % 4), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp8 = tl.load(in_ptr0 + (4 + x0), xmask).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0), xmask).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (8 + x0), xmask).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (12 + x0), xmask).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (12 + x0), xmask).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask)
tmp40 = tl.load(in_ptr3 + x0, xmask).to(tl.int1)
tmp41 = tl.load(in_ptr4 + x0, xmask)
tmp45 = tl.load(in_ptr3 + (4 + x0), xmask).to(tl.int1)
tmp46 = tl.load(in_ptr4 + (4 + x0), xmask)
tmp51 = tl.load(in_ptr3 + (8 + x0), xmask).to(tl.int1)
tmp52 = tl.load(in_ptr4 + (8 + x0), xmask)
tmp57 = tl.load(in_ptr3 + (12 + x0), xmask).to(tl.int1)
tmp58 = tl.load(in_ptr4 + (12 + x0), xmask)
tmp74 = tl.load(in_ptr5 + x0, xmask).to(tl.int1)
tmp75 = tl.load(in_ptr6 + x0, xmask)
tmp79 = tl.load(in_ptr5 + (4 + x0), xmask).to(tl.int1)
tmp80 = tl.load(in_ptr6 + (4 + x0), xmask)
tmp85 = tl.load(in_ptr5 + (8 + x0), xmask).to(tl.int1)
tmp86 = tl.load(in_ptr6 + (8 + x0), xmask)
tmp91 = tl.load(in_ptr5 + (12 + x0), xmask).to(tl.int1)
tmp92 = tl.load(in_ptr6 + (12 + x0), xmask)
tmp108 = tl.load(in_ptr7 + x0, xmask).to(tl.int1)
tmp109 = tl.load(in_ptr8 + x0, xmask)
tmp113 = tl.load(in_ptr7 + (4 + x0), xmask).to(tl.int1)
tmp114 = tl.load(in_ptr8 + (4 + x0), xmask)
tmp119 = tl.load(in_ptr7 + (8 + x0), xmask).to(tl.int1)
tmp120 = tl.load(in_ptr8 + (8 + x0), xmask)
tmp125 = tl.load(in_ptr7 + (12 + x0), xmask).to(tl.int1)
tmp126 = tl.load(in_ptr8 + (12 + x0), xmask)
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -999999995904.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
tl.store(out_ptr2 + x0, tmp62, xmask)
tl.store(out_ptr3 + x0, tmp73, xmask)
tl.store(out_ptr4 + x0, tmp96, xmask)
tl.store(out_ptr5 + x0, tmp107, xmask)
tl.store(out_ptr6 + x0, tmp130, xmask)
tl.store(out_ptr7 + x0, tmp141, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + x2, xmask).to(tl.int1)
tmp14 = tl.load(in_out_ptr1 + x2, xmask)
tmp18 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr7 + x2, xmask).to(tl.int1)
tmp24 = tl.load(in_out_ptr2 + x2, xmask)
tmp28 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr9 + x0, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr10 + x2, xmask).to(tl.int1)
tmp34 = tl.load(in_out_ptr3 + x2, xmask)
tmp38 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr12 + x0, xmask, eviction_policy='evict_last')
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -999999995904.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(in_out_ptr0 + x2, tmp12, xmask)
tl.store(in_out_ptr1 + x2, tmp22, xmask)
tl.store(in_out_ptr2 + x2, tmp32, xmask)
tl.store(in_out_ptr3 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tl.full([1], 16, tl.int64)
tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_5(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp8 = tl.load(in_ptr0 + (4 + x0), xmask).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0), xmask).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (8 + x0), xmask).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (12 + x0), xmask).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (12 + x0), xmask).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask)
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -999999995904.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -999999995904.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(in_out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((1, 16, 8), (128, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (16, 8), (8, 1), 0),
primals_3, out=buf2)
buf3 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](primals_4, buf4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((1, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf9, buf10, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 8), (8, 1), 0),
primals_6, out=buf11)
buf12 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf11, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((1, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf17, buf18, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 8), (8, 1), 0),
primals_8, out=buf19)
buf20 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf19, buf20, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((1, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf25, buf26, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (16, 8), (8, 1), 0),
primals_10, out=buf27)
buf28 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf27, buf28, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
buf6 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
buf13 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
buf14 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
buf21 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
buf22 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
buf29 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
buf30 = empty_strided_cuda((1, 1, 4), (4, 4, 1), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_2[grid(4)](buf4,
buf3, buf2, buf12, buf11, buf20, buf19, buf28, buf27, buf5,
buf6, buf13, buf14, buf21, buf22, buf29, buf30, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf2, (1, 4, 4), (16, 4, 1), 0)
del buf2
buf15 = reinterpret_tensor(buf11, (1, 4, 4), (16, 4, 1), 0)
del buf11
buf23 = reinterpret_tensor(buf19, (1, 4, 4), (16, 4, 1), 0)
del buf19
buf31 = reinterpret_tensor(buf27, (1, 4, 4), (16, 4, 1), 0)
del buf27
triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(16)](buf7,
buf15, buf23, buf31, buf4, buf3, buf5, buf6, buf12, buf13,
buf14, buf20, buf21, buf22, buf28, buf29, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del buf21
del buf22
del buf29
del buf30
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 4), (4, 1), 0), buf0,
out=buf8)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (4, 4), (4, 1), 0),
buf9, out=buf16)
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf23, (4, 4), (4, 1), 0),
buf17, out=buf24)
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf31, (4, 4), (4, 1), 0),
buf25, out=buf32)
buf33 = empty_strided_cuda((1, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_cat_4[grid(64)](buf8, buf16, buf24, buf32, buf33,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf33, (4, 16), (16, 1), 0),
primals_11, out=buf34)
buf35 = empty_strided_cuda((1, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(128)](buf34, buf35, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf36 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf35, (16, 8), (8, 1), 0),
primals_12, out=buf36)
buf37 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(16)](buf36, buf37, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf38 = buf6
del buf6
buf39 = buf5
del buf5
triton_poi_fused__softmax_leaky_relu_mul_where_5[grid(4)](buf4,
buf37, buf36, buf38, buf39, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf40 = reinterpret_tensor(buf36, (1, 4, 4), (16, 4, 1), 0)
del buf36
triton_poi_fused__softmax_leaky_relu_mul_where_6[grid(16)](buf40,
buf4, buf37, buf38, buf39, 16, XBLOCK=16, num_warps=1, num_stages=1
)
del buf38
del buf39
buf41 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf40, reinterpret_tensor(buf34, (1, 4, 4), (16,
4, 1), 0), out=buf41)
buf42 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_elu_7[grid(16)](buf41, buf42, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return (buf42, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20,
buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41,
reinterpret_tensor(buf34, (1, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf35, (8, 16), (1, 8), 0), reinterpret_tensor(
primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf33, (16, 4),
(1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0),
reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(
buf26, (8, 16), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8),
(1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor(
buf18, (8, 16), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), (
1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0),
reinterpret_tensor(buf10, (8, 16), (1, 8), 0), reinterpret_tensor(
primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1,
4), 0), reinterpret_tensor(buf1, (8, 16), (1, 8), 0),
reinterpret_tensor(primals_3, (1, 8), (1, 1), 0))
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
图注意力层
input: (B,N,C_in)
output: (B,N,C_out)
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, inp, adj):
"""
inp: input_fea [B,N, in_features] in_features表示节点的输入特征向量元素个数
adj: 图的邻接矩阵 [N, N] 非零即一,数据结构基本知识
"""
h = torch.matmul(inp, self.W)
N = h.size()[1]
a_input = torch.cat([h.repeat(1, 1, N).view(-1, N * N, self.
out_features), h.repeat(1, N, 1)], dim=-1).view(-1, N, N, 2 *
self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -1000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GATNew(nn.Module):
def __init__(self, n_feat, n_hid, n_class, dropout, alpha=0.2, n_heads=4):
"""Dense version of GAT
n_heads 表示有几个GAL层,最后进行拼接在一起,类似self-attention
从不同的子空间进行抽取特征。
"""
super(GATNew, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(n_feat, n_hid, dropout=
dropout, alpha=alpha, concat=True) for _ in range(n_heads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(n_hid * n_heads, n_class,
dropout=dropout, alpha=alpha, concat=False)
def forward(self, input_0, input_1):
primals_1 = self.attention_0.W
primals_3 = self.attention_0.a
primals_2 = self.attention_1.W
primals_6 = self.attention_1.a
primals_4 = self.attention_2.W
primals_8 = self.attention_2.a
primals_5 = self.attention_3.W
primals_10 = self.attention_3.a
primals_11 = self.out_att.W
primals_12 = self.out_att.a
primals_7 = input_0
primals_9 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
Richard-LYF/SESS-GC
|
GAT
| false
| 2,861
|
[
"MIT"
] | 0
|
2280e5ec8e5c5e20d0bda629b7d05f61bad0bec7
|
https://github.com/Richard-LYF/SESS-GC/tree/2280e5ec8e5c5e20d0bda629b7d05f61bad0bec7
|
InstanceNorm2dPlus
|
import torch
import torch.nn as nn
class InstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False,
track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / torch.sqrt(v + 1e-05)
h = self.instance_norm(x)
if self.bias:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1
) * h + self.beta.view(-1, self.num_features, 1, 1)
else:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_mean_0(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp23, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_sqrt_sub_var_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp6 = tmp5 / tmp1
tmp7 = tmp4 + tmp6
tmp9 = tmp8 / tmp1
tmp10 = tmp7 + tmp9
tmp12 = tmp11 / tmp1
tmp13 = tmp10 + tmp12
tmp14 = 4.0
tmp15 = tmp13 / tmp14
tmp16 = tmp2 - tmp15
tmp17 = tmp4 - tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp6 - tmp15
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp15
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp12 - tmp15
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tmp30 = 1e-05
tmp31 = tmp29 + tmp30
tmp32 = libdevice.sqrt(tmp31)
tmp33 = tmp16 / tmp32
tl.store(out_ptr0 + x2, tmp33, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp0 * tmp9
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf3
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_mean_0[grid(16)](buf5,
primals_1, buf0, buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_mean_sqrt_sub_var_1[grid(16)](buf0, buf1,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(256)](primals_3, primals_1, buf2,
buf5, buf1, primals_2, primals_4, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
return (buf6, primals_1, primals_2, primals_3, buf2, buf5,
reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0))
class InstanceNorm2dPlusNew(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False,
track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.gamma
primals_4 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Sriram-Ravula/ncsnv2
|
InstanceNorm2dPlus
| false
| 2,862
|
[
"MIT"
] | 0
|
f610b59441a34063fae1c02aa06837b7eec95c03
|
https://github.com/Sriram-Ravula/ncsnv2/tree/f610b59441a34063fae1c02aa06837b7eec95c03
|
Cartesian
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
class Cartesian(nn.Module):
def forward(self, x):
r, phi = x[..., 0], x[..., 1]
return torch.stack((r * torch.cos(phi), r * torch.sin(phi)), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + 4 * x1), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CartesianNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Samuel-van-Gurp/fastMRI
|
Cartesian
| false
| 2,863
|
[
"MIT"
] | 0
|
0b1884a1c218961f81199144057ffcfde29a86ad
|
https://github.com/Samuel-van-Gurp/fastMRI/tree/0b1884a1c218961f81199144057ffcfde29a86ad
|
MeanPoolConv
|
import torch
import torch.nn as nn
def spectral_norm(layer, n_iters=1):
return torch.nn.utils.spectral_norm(layer, n_power_iterations=n_iters)
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
spec_norm=False):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
self.conv = spectral_norm(self.conv)
def forward(self, inputs):
output = inputs
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return self.conv(output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0
def spectral_norm(layer, n_iters=1):
return torch.nn.utils.spectral_norm(layer, n_power_iterations=n_iters)
class MeanPoolConvNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
spec_norm=False):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
if spec_norm:
self.conv = spectral_norm(self.conv)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sriram-Ravula/ncsnv2
|
MeanPoolConv
| false
| 2,864
|
[
"MIT"
] | 0
|
f610b59441a34063fae1c02aa06837b7eec95c03
|
https://github.com/Sriram-Ravula/ncsnv2/tree/f610b59441a34063fae1c02aa06837b7eec95c03
|
Accuracy
|
import torch
import torch.nn as nn
def accuracy(logits, labels, ignore_index: 'int'=-100):
with torch.no_grad():
valid_mask = labels != ignore_index
predictions = logits.float().argmax(-1)
correct = (predictions == labels) * valid_mask
return correct.sum().float() / valid_mask.sum().float()
class Accuracy(nn.Module):
def __init__(self, ignore_index: 'int'=-100):
super().__init__()
self.ignore_index = ignore_index
def forward(self, inputs, target):
return accuracy(inputs, target, self.ignore_index)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_per_fused__to_copy_div_eq_mul_ne_sum_1(in_ptr0, in_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + r2, None)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp1 == tmp2
tmp4 = -100.0
tmp5 = tmp2 != tmp4
tmp6 = tmp3 & tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = tmp5.to(tl.int64)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp10.to(tl.float32)
tmp16 = tmp14.to(tl.float32)
tmp17 = tmp15 / tmp16
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__to_copy_div_eq_mul_ne_sum_1[grid(1)](buf0, arg1_1,
buf3, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
def accuracy(logits, labels, ignore_index: 'int'=-100):
with torch.no_grad():
valid_mask = labels != ignore_index
predictions = logits.float().argmax(-1)
correct = (predictions == labels) * valid_mask
return correct.sum().float() / valid_mask.sum().float()
class AccuracyNew(nn.Module):
def __init__(self, ignore_index: 'int'=-100):
super().__init__()
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
StephanHeijl/tape
|
Accuracy
| false
| 2,865
|
[
"BSD-3-Clause"
] | 0
|
ec631ca53217686605477cf31af4fb8846ff660f
|
https://github.com/StephanHeijl/tape/tree/ec631ca53217686605477cf31af4fb8846ff660f
|
TemporalAttention
|
import torch
from torch import nn
class TemporalAttention(nn.Module):
"""
Temporal attention module from https://dl.acm.org/doi/abs/10.1145/3448083
"""
def __init__(self, hidden_dim):
super(TemporalAttention, self).__init__()
self.fc = nn.Linear(hidden_dim, 1)
self.sm = torch.nn.Softmax(dim=0)
def forward(self, x):
out = self.fc(x).squeeze(2)
weights_att = self.sm(out).unsqueeze(2)
context = torch.sum(weights_att * x, 0)
return context
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp3 = tl.load(in_ptr0 + (16 + x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + x4, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(256)](buf3, primals_3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf3
return buf4, primals_3, buf1
class TemporalAttentionNew(nn.Module):
"""
Temporal attention module from https://dl.acm.org/doi/abs/10.1145/3448083
"""
def __init__(self, hidden_dim):
super(TemporalAttentionNew, self).__init__()
self.fc = nn.Linear(hidden_dim, 1)
self.sm = torch.nn.Softmax(dim=0)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
STRCSussex-UbiCompSiegen/dl_har_model
|
TemporalAttention
| false
| 2,866
|
[
"MIT"
] | 0
|
caac0f87fc7dd08a5d6ad3e4455ee25b35f5e7b4
|
https://github.com/STRCSussex-UbiCompSiegen/dl_har_model/tree/caac0f87fc7dd08a5d6ad3e4455ee25b35f5e7b4
|
LayerNorm
|
import torch
import torch.nn as nn
from torch.nn import Parameter
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.size(0) == 1:
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp28 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-08
tmp25 = tmp23 + tmp24
tmp26 = tmp0 - tmp20
tmp27 = tmp26 / tmp25
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(4)](buf1, buf5,
primals_1, primals_2, primals_3, buf6, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1,
1), 0), buf5
class LayerNormNew(nn.Module):
def __init__(self, num_features, eps=1e-08, affine=True):
super(LayerNormNew, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = Parameter(torch.Tensor(num_features).uniform_())
self.beta = Parameter(torch.zeros(num_features))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Sheroa/Video_Colorization
|
LayerNorm
| false
| 2,867
|
[
"MIT"
] | 0
|
5c772ac0ec944814cd8be0a94b0746116b11ac01
|
https://github.com/Sheroa/Video_Colorization/tree/5c772ac0ec944814cd8be0a94b0746116b11ac01
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import *
from copy import *
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from math import *
from copy import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Sup3Legacy/TIPE
|
Net
| false
| 2,868
|
[
"BSD-3-Clause"
] | 0
|
7e01cef869183c4d609c45d5fcf0bb371a9579f5
|
https://github.com/Sup3Legacy/TIPE/tree/7e01cef869183c4d609c45d5fcf0bb371a9579f5
|
ReidModel
|
import torch
import torch.nn as nn
class ReidModel(nn.Module):
def __init__(self, num_features_in, num_anchors=1, num_classes=80,
prior=0.01, feature_size=256):
super(ReidModel, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = self.output_act(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 5120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 80
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (80, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (80,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 80, 4, 4), (1280, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_sigmoid_1[grid(5120)](buf9, primals_11,
5120, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7, buf9)
class ReidModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=1, num_classes=80,
prior=0.01, feature_size=256):
super(ReidModelNew, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
SajjadPSavoji/CTracker
|
ReidModel
| false
| 2,869
|
[
"MIT"
] | 0
|
f345925cccca13d045dea5d435ba3d463df7729a
|
https://github.com/SajjadPSavoji/CTracker/tree/f345925cccca13d045dea5d435ba3d463df7729a
|
ConcatConv2d
|
import torch
import torch.nn as nn
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 5
x0 = xindex % 16
x2 = xindex // 80
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_3, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(320)](primals_2, primals_1, buf0, 320,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf2, primals_3, buf0
class ConcatConv2dNew(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2dNew, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, input_0, input_1):
primals_3 = self._layer.weight
primals_4 = self._layer.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Teemo341/BDNN
|
ConcatConv2d
| false
| 2,870
|
[
"Apache-2.0"
] | 0
|
d53d4634a7a43d038faa049d7dfd10b3578ae267
|
https://github.com/Teemo341/BDNN/tree/d53d4634a7a43d038faa049d7dfd10b3578ae267
|
QNetwork
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class QNetwork(nn.Module):
""" Actor Policy (Q Network) model """
def __init__(self, state_size, action_size, seed, fc1_units=512,
fc2_units=256, fc3_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
fcx_units (int): Dimension of hidden sizes, x = ith layer
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc1_units)
self.fc3 = nn.Linear(fc1_units, fc2_units)
self.fc4 = nn.Linear(fc2_units, fc3_units)
self.fc5 = nn.Linear(fc3_units, action_size)
def forward(self, state):
x = self.fc1(state)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
x = self.fc4(x)
x = F.relu(x)
action = self.fc5(x)
return action
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 512), (512, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (256, 512), (512, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (64, 256), (256, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (4, 64), (64, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1,
primals_2, buf12, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0),
reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf2
buf11 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf3,
primals_5, buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 512), (512, 1), 0),
reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf4
buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf5,
primals_7, buf10, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 64), (1, 256), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf6
buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_2[grid(4096)](buf7,
primals_9, buf9, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_10, (64, 4), (1, 64), 0
), alpha=1, beta=1, out=buf8)
del primals_11
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 512), (512, 1), 0
), reinterpret_tensor(buf3, (64, 512), (512, 1), 0
), reinterpret_tensor(buf5, (64, 256), (256, 1), 0
), reinterpret_tensor(buf7, (64, 64), (64, 1), 0
), primals_10, buf9, primals_8, buf10, primals_6, buf11, primals_4, buf12
class QNetworkNew(nn.Module):
""" Actor Policy (Q Network) model """
def __init__(self, state_size, action_size, seed, fc1_units=512,
fc2_units=256, fc3_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
fcx_units (int): Dimension of hidden sizes, x = ith layer
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc1_units)
self.fc3 = nn.Linear(fc1_units, fc2_units)
self.fc4 = nn.Linear(fc2_units, fc3_units)
self.fc5 = nn.Linear(fc3_units, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Sushil-Thapa/ud-deep-reinforcement-learning
|
QNetwork
| false
| 2,871
|
[
"MIT"
] | 0
|
847873d771550aa7f502fa34e918d33ccc545608
|
https://github.com/Sushil-Thapa/ud-deep-reinforcement-learning/tree/847873d771550aa7f502fa34e918d33ccc545608
|
OutlookAttention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
class OutlookAttention(nn.Module):
"""
Implementation of outlook attention
--dim: hidden dim
--num_heads: number of heads
--kernel_size: kernel size in each window for outlook attention
return: token features after outlook attention
"""
def __init__(self, dim, num_heads, kernel_size=3, padding=1, stride=1,
qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
head_dim = dim // num_heads
self.num_heads = num_heads
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.scale = qk_scale or head_dim ** -0.5
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding,
stride=stride)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True)
def forward(self, x):
B, H, W, C = x.shape
v = self.v(x).permute(0, 3, 1, 2)
h, w = math.ceil(H / self.stride), math.ceil(W / self.stride)
v = self.unfold(v).reshape(B, self.num_heads, C // self.num_heads,
self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2)
attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
attn = self.attn(attn).reshape(B, h * w, self.num_heads, self.
kernel_size * self.kernel_size, self.kernel_size * self.kernel_size
).permute(0, 2, 1, 3, 4)
attn = attn * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.
kernel_size * self.kernel_size, h * w)
x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size,
padding=self.padding, stride=self.stride)
x = self.proj(x.permute(0, 2, 3, 1))
x = self.proj_drop(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_im2col_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0 + x1
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_mul_2(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 2304
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x7 = xindex
x0 = xindex % 36
x3 = xindex % 9
x4 = xindex // 9 % 4
x5 = xindex // 36 % 16
x6 = xindex // 576
tmp0 = tl.load(in_ptr0 + (r2 + 9 * x7), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r2 + 9 * x0), rmask & xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(rmask & xmask, tmp5, float('-inf'))
tmp8 = triton_helpers.max2(tmp7, 1)[:, None]
tmp9 = tmp4 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(rmask & xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r2 + 9 * x3 + 81 * x5 + 1312 * x4 + 5248 * x6),
tmp15, rmask & xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x1 = xindex // 9 % 16
x2 = xindex // 144 % 4
x3 = xindex // 576
x5 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x0 // 3) + x1 // 4), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (4 * (x0 % 3) + x1 % 4), xmask,
eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 6, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 6) | ~xmask,
'index out of bounds: 0 <= tmp4 < 6')
tmp7 = tmp6 + tmp1
tmp8 = tmp6 < 0
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.device_assert((0 <= tmp9) & (tmp9 < 6) | ~xmask,
'index out of bounds: 0 <= tmp9 < 6')
tmp11 = -1 + tmp4
tmp12 = tl.full([1], 0, tl.int64)
tmp13 = tmp11 >= tmp12
tmp14 = tl.full([1], 4, tl.int64)
tmp15 = tmp11 < tmp14
tmp16 = -1 + tmp9
tmp17 = tmp16 >= tmp12
tmp18 = tmp16 < tmp14
tmp19 = tmp13 & tmp15
tmp20 = tmp19 & tmp17
tmp21 = tmp20 & tmp18
tmp22 = tl.load(in_ptr1 + (-20 + x2 + 4 * tmp9 + 16 * tmp4 + 64 * x3),
tmp21 & xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + x5, tmp22, xmask)
@triton.jit
def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20736
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 81
x1 = xindex // 81
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 81 * (x1 % 16) + 1312 * (x1 // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_col2im_5(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_col2im_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 576
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y5 = yindex // 3 % 12
x4 = xindex
y0 = yindex % 3
y1 = yindex // 3 % 4
y2 = yindex // 12 % 3
y3 = yindex // 36
tmp0 = tl.load(in_ptr0 + y5, ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (x4 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (y0 + 3 * y2 + 9 * x4 + 36 * y1 + 144 * y3 +
144 * ((y0 + 3 * y2) // 9)), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.full([XBLOCK, YBLOCK], 6, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 6) | ~ymask,
'index out of bounds: 0 <= tmp4 < 6')
tmp7 = tmp6 + tmp1
tmp8 = tmp6 < 0
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.device_assert((0 <= tmp9) & (tmp9 < 6) | ~(xmask & ymask),
'index out of bounds: 0 <= tmp9 < 6')
tl.atomic_add(out_ptr0 + tl.broadcast_to(tmp9 + 6 * tmp4 + 36 * y3, [
XBLOCK, YBLOCK]), tmp11, xmask & ymask, sem='relaxed')
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y1 = yindex // 4 % 4
y0 = yindex % 4
x3 = xindex
y2 = yindex // 16
y5 = yindex
tmp0 = 1 + y1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 6, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + y0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (7 + y0 + 6 * y1 + 36 * x3 + 144 * y2), tmp10 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x3 + 4 * y5), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (324, 4), (4, 1))
assert_size_stride(primals_4, (324,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((3, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_im2col_0[grid(12)](buf1, 12, XBLOCK=16, num_warps=
1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_avg_pool2d_1[grid(256)](primals_1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 324), (324, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 324), (1, 4), 0), out=buf3)
del primals_3
buf6 = empty_strided_cuda((4, 4, 16, 9, 9), (5248, 1312, 81, 9, 1),
torch.float32)
triton_per_fused__softmax_mul_2[grid(2304)](buf3, primals_4, buf6,
2304, 9, XBLOCK=8, num_warps=2, num_stages=1)
del primals_4
buf7 = empty_strided_cuda((4, 4, 16, 9, 1), (576, 144, 9, 1, 1),
torch.float32)
triton_poi_fused_clone_3[grid(2304)](buf1, buf0, buf7, 2304, XBLOCK
=256, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf3, (256, 9, 9), (81, 9, 1), 0)
del buf3
triton_poi_fused_bmm_4[grid(20736)](buf6, buf8, 20736, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((256, 9, 1), (9, 1, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf7, (256, 9, 1), (9,
1, 0), 0), out=buf9)
del buf8
buf10 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32
)
triton_poi_fused_col2im_5[grid(576)](buf10, 576, XBLOCK=256,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32
)
triton_poi_fused_col2im_5[grid(576)](buf11, 576, XBLOCK=256,
num_warps=4, num_stages=1)
triton_poi_fused_col2im_6[grid(576, 4)](buf1, buf9, buf11, 576, 4,
XBLOCK=1, YBLOCK=256, num_warps=4, num_stages=1)
del buf9
buf13 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_7[grid(64, 4)](buf11, buf13, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del buf11
buf14 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf14)
buf15 = reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf14
triton_poi_fused_add_8[grid(256)](buf15, primals_6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_6
return buf15, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf6, buf10, reinterpret_tensor(buf13, (64, 4), (4, 1), 0
), primals_5, reinterpret_tensor(buf7, (256, 1, 9), (9, 1, 1), 0)
class OutlookAttentionNew(nn.Module):
"""
Implementation of outlook attention
--dim: hidden dim
--num_heads: number of heads
--kernel_size: kernel size in each window for outlook attention
return: token features after outlook attention
"""
def __init__(self, dim, num_heads, kernel_size=3, padding=1, stride=1,
qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
head_dim = dim // num_heads
self.num_heads = num_heads
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.scale = qk_scale or head_dim ** -0.5
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding,
stride=stride)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True)
def forward(self, input_0):
primals_2 = self.v.weight
primals_3 = self.attn.weight
primals_4 = self.attn.bias
primals_5 = self.proj.weight
primals_6 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
QLSong/cv-classify
|
OutlookAttention
| false
| 2,872
|
[
"Apache-2.0"
] | 0
|
02f53d03868f299a08b5c97a266b50a7fdcd3f2b
|
https://github.com/QLSong/cv-classify/tree/02f53d03868f299a08b5c97a266b50a7fdcd3f2b
|
MLPBlock
|
import torch
from torch import nn
class MLPBlock(nn.Module):
def __init__(self, in_features, out_features, bias=True, layer_norm=
True, dropout=0.3, activation=nn.ReLU):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias)
self.activation = activation()
self.layer_norm = nn.LayerNorm(out_features) if layer_norm else None
self.dropout = nn.Dropout(dropout) if dropout else None
def forward(self, x):
x = self.activation(self.linear(x))
if self.layer_norm:
x = self.layer_norm(x)
if self.dropout:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_relu_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_relu_0[grid(64)](buf0, buf1,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(256)](buf0, buf1,
buf2, primals_4, primals_5, buf3, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class MLPBlockNew(nn.Module):
def __init__(self, in_features, out_features, bias=True, layer_norm=
True, dropout=0.3, activation=nn.ReLU):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias)
self.activation = activation()
self.layer_norm = nn.LayerNorm(out_features) if layer_norm else None
self.dropout = nn.Dropout(dropout) if dropout else None
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = self.layer_norm.weight
primals_5 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
THinnerichs/deepgozero
|
MLPBlock
| false
| 2,873
|
[
"BSD-3-Clause"
] | 0
|
5f1481c41f879f7ec1b5eea22dcccdb8bf8825e2
|
https://github.com/THinnerichs/deepgozero/tree/5f1481c41f879f7ec1b5eea22dcccdb8bf8825e2
|
CRF
|
import torch
import torch.nn as nn
import torch.utils.data
class CRF(nn.Module):
"""Implements Conditional Random Fields"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_tags': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp5, tmp11)
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp27 = tmp5 > tmp11
tmp28 = tmp5 == tmp11
tmp29 = tmp5 != tmp5
tmp30 = tmp11 != tmp11
tmp31 = tmp29 > tmp30
tmp32 = tmp27 | tmp31
tmp33 = tmp29 & tmp30
tmp34 = tmp28 | tmp33
tmp35 = tl.full([1], 0, tl.int64)
tmp36 = tl.full([1], 1, tl.int64)
tmp37 = tmp35 < tmp36
tmp38 = tmp34 & tmp37
tmp39 = tmp32 | tmp38
tmp40 = tl.where(tmp39, tmp5, tmp11)
tmp41 = tl.where(tmp39, tmp35, tmp36)
tmp42 = tmp40 > tmp18
tmp43 = tmp40 == tmp18
tmp44 = tmp40 != tmp40
tmp45 = tmp18 != tmp18
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp41 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp40, tmp18)
tmp55 = tl.where(tmp53, tmp41, tmp50)
tmp56 = tmp54 > tmp25
tmp57 = tmp54 == tmp25
tmp58 = tmp54 != tmp54
tmp59 = tmp25 != tmp25
tmp60 = tmp58 > tmp59
tmp61 = tmp56 | tmp60
tmp62 = tmp58 & tmp59
tmp63 = tmp57 | tmp62
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp55 < tmp64
tmp66 = tmp63 & tmp65
tmp67 = tmp61 | tmp66
tl.where(tmp67, tmp54, tmp25)
tmp69 = tl.where(tmp67, tmp55, tmp64)
tl.store(out_ptr0 + x2, tmp26, xmask)
tl.store(out_ptr1 + x2, tmp69, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr2 + 2)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp47 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr2 + 3)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp8 + tmp10
tmp12 = tmp5 > tmp11
tmp13 = tmp5 == tmp11
tmp14 = tmp5 != tmp5
tmp15 = tmp11 != tmp11
tmp16 = tmp14 > tmp15
tmp17 = tmp12 | tmp16
tmp18 = tmp14 & tmp15
tmp19 = tmp13 | tmp18
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 1, tl.int64)
tmp22 = tmp20 < tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 | tmp23
tmp25 = tl.where(tmp24, tmp5, tmp11)
tmp26 = tl.where(tmp24, tmp20, tmp21)
tmp29 = tmp27 + tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp25 > tmp32
tmp34 = tmp25 == tmp32
tmp35 = tmp25 != tmp25
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 2, tl.int64)
tmp42 = tmp26 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp25, tmp32)
tmp46 = tl.where(tmp44, tmp26, tmp41)
tmp49 = tmp47 + tmp48
tmp52 = tmp49 + tmp51
tmp53 = tmp45 > tmp52
tmp54 = tmp45 == tmp52
tmp55 = tmp45 != tmp45
tmp56 = tmp52 != tmp52
tmp57 = tmp55 > tmp56
tmp58 = tmp53 | tmp57
tmp59 = tmp55 & tmp56
tmp60 = tmp54 | tmp59
tmp61 = tl.full([1], 3, tl.int64)
tmp62 = tmp46 < tmp61
tmp63 = tmp60 & tmp62
tmp64 = tmp58 | tmp63
tl.where(tmp64, tmp45, tmp52)
tmp66 = tl.where(tmp64, tmp46, tmp61)
tmp67 = tl.full([XBLOCK], 4, tl.int32)
tmp68 = tmp66 + tmp67
tmp69 = tmp66 < 0
tmp70 = tl.where(tmp69, tmp68, tmp66)
tl.device_assert((0 <= tmp70) & (tmp70 < 4) | ~xmask,
'index out of bounds: 0 <= tmp70 < 4')
tmp72 = tl.load(in_ptr3 + (tmp70 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tmp72 + tmp67
tmp74 = tmp72 < 0
tmp75 = tl.where(tmp74, tmp73, tmp72)
tl.device_assert((0 <= tmp75) & (tmp75 < 4) | ~xmask,
'index out of bounds: 0 <= tmp75 < 4')
tmp77 = tl.load(in_ptr4 + (tmp75 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp78 = tmp77 + tmp67
tmp79 = tmp77 < 0
tmp80 = tl.where(tmp79, tmp78, tmp77)
tl.device_assert((0 <= tmp80) & (tmp80 < 4) | ~xmask,
'index out of bounds: 0 <= tmp80 < 4')
tmp82 = tl.load(in_ptr5 + (tmp80 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp66, xmask)
tl.store(out_ptr1 + 4 * x0, tmp82, xmask)
tl.store(out_ptr2 + 4 * x0, tmp77, xmask)
tl.store(out_ptr3 + 4 * x0, tmp72, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf0, arg0_1, arg2_1, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = buf0
del buf0
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf2, arg0_1, arg2_1, buf4,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg2_1
del buf2
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3)
buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0)
buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1)
buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf4, arg0_1, arg3_1,
buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg0_1
del arg3_1
del buf1
del buf3
del buf4
del buf5
return buf10,
class CRFNew(nn.Module):
"""Implements Conditional Random Fields"""
def __init__(self, num_tags):
super(CRFNew, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def forward(self, input_0):
arg2_1 = self.transitions
arg1_1 = self.start_transitions
arg3_1 = self.stop_transitions
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
Syhen/vtou-ner
|
CRF
| false
| 2,874
|
[
"MIT"
] | 0
|
708eb3d475fbce91949a7ca3b0bf2631c4feba62
|
https://github.com/Syhen/vtou-ner/tree/708eb3d475fbce91949a7ca3b0bf2631c4feba62
|
QuantConv1d
|
import torch
from torch import nn
class QuantConv1d(nn.Module):
"""Quantized 1D Conv"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
**kwargs):
super().__init__()
self.qconv1d = nn.Conv1d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias, padding_mode=padding_mode, **kwargs)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.qconv1d(x)
x = self.dequant(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 1), (4, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(4)](buf1, primals_3, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf1, (4, 1), (1, 1), 0
), primals_2, reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0)
class QuantConv1dNew(nn.Module):
"""Quantized 1D Conv"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
**kwargs):
super().__init__()
self.qconv1d = nn.Conv1d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=
groups, bias=bias, padding_mode=padding_mode, **kwargs)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, input_0):
primals_2 = self.qconv1d.weight
primals_3 = self.qconv1d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TeaPoly/wenet
|
QuantConv1d
| false
| 2,875
|
[
"Apache-2.0"
] | 0
|
5681887e338e4c8b2c75ffc283140e11a9d56a6d
|
https://github.com/TeaPoly/wenet/tree/5681887e338e4c8b2c75ffc283140e11a9d56a6d
|
GumbelSoftmaxLayer
|
import torch
import torch.nn as nn
from torch.distributions import RelaxedOneHotCategorical
import torch.nn.parallel
import torch.utils.data
import torch.distributions
def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0,
training: 'bool'=True, straight_through: 'bool'=False):
size = logits.size()
if not training:
indexes = logits.argmax(dim=-1)
one_hot = torch.zeros_like(logits).view(-1, size[-1])
one_hot.scatter_(1, indexes.view(-1, 1), 1)
one_hot = one_hot.view(*size)
return one_hot
sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature
).rsample()
if straight_through:
size = sample.size()
indexes = sample.argmax(dim=-1)
hard_sample = torch.zeros_like(sample).view(-1, size[-1])
hard_sample.scatter_(1, indexes.view(-1, 1), 1)
hard_sample = hard_sample.view(*size)
sample = sample + (hard_sample - sample).detach()
return sample
class GumbelSoftmaxLayer(nn.Module):
def __init__(self, temperature: 'float'=1.0, trainable_temperature:
'bool'=False, straight_through: 'bool'=False):
super(GumbelSoftmaxLayer, self).__init__()
self.straight_through = straight_through
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature
]), requires_grad=True)
def forward(self, logits: 'torch.Tensor'):
return gumbel_softmax_sample(logits, self.temperature, self.
training, self.straight_through)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.distributions import RelaxedOneHotCategorical
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_scatter_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(in_out_ptr0 + x4, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_scatter_1[grid(256)](buf2, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
return buf2,
def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0,
training: 'bool'=True, straight_through: 'bool'=False):
size = logits.size()
if not training:
indexes = logits.argmax(dim=-1)
one_hot = torch.zeros_like(logits).view(-1, size[-1])
one_hot.scatter_(1, indexes.view(-1, 1), 1)
one_hot = one_hot.view(*size)
return one_hot
sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature
).rsample()
if straight_through:
size = sample.size()
indexes = sample.argmax(dim=-1)
hard_sample = torch.zeros_like(sample).view(-1, size[-1])
hard_sample.scatter_(1, indexes.view(-1, 1), 1)
hard_sample = hard_sample.view(*size)
sample = sample + (hard_sample - sample).detach()
return sample
class GumbelSoftmaxLayerNew(nn.Module):
def __init__(self, temperature: 'float'=1.0, trainable_temperature:
'bool'=False, straight_through: 'bool'=False):
super(GumbelSoftmaxLayerNew, self).__init__()
self.straight_through = straight_through
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature
]), requires_grad=True)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Shawn-Guo-CN/EGG
|
GumbelSoftmaxLayer
| false
| 2,876
|
[
"MIT"
] | 0
|
0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
https://github.com/Shawn-Guo-CN/EGG/tree/0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
RegressionModel
|
import torch
import torch.nn as nn
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchors=1, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 8, kernel_size=
3, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 8)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 8 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (8, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 8, 4, 4), (128, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_clone_1[grid(64, 8)](buf8, primals_11, buf9, 64, 8,
XBLOCK=8, YBLOCK=64, num_warps=4, num_stages=1)
del buf8
del primals_11
return (reinterpret_tensor(buf9, (4, 16, 8), (128, 8, 1), 0), primals_1,
primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3,
buf5, buf7)
class RegressionModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=1, feature_size=256):
super(RegressionModelNew, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 8, kernel_size=
3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
SajjadPSavoji/CTracker
|
RegressionModel
| false
| 2,877
|
[
"MIT"
] | 0
|
f345925cccca13d045dea5d435ba3d463df7729a
|
https://github.com/SajjadPSavoji/CTracker/tree/f345925cccca13d045dea5d435ba3d463df7729a
|
SimpleAttention
|
import torch
import torch.nn.functional as F
from torch import nn
class SimpleAttention(nn.Module):
def __init__(self, n_features, n_hidden, key=False, copy=False, query=
True, memory=False):
super().__init__()
self.key = key
self.query = query
self.memory = memory
self.n_features = n_features
self.n_hidden = n_hidden
self.copy = copy
if self.copy:
assert self.query
if self.key:
self.make_key = nn.Linear(n_features, n_hidden)
if self.query:
self.make_query = nn.Linear(n_features, (1 + copy) * n_hidden)
if self.memory:
self.make_memory = nn.Linear(n_features, n_hidden)
self.n_out = n_hidden
def forward(self, features, hidden, mask=None):
if self.key:
key = self.make_key(features)
else:
key = features
if self.memory:
memory = self.make_memory(features)
else:
memory = features
if self.query:
query = self.make_query(hidden)
else:
query = hidden
if self.copy:
query = query.view(1, -1, query.shape[-1] // 2, 2)
key = key.unsqueeze(-1)
mask = mask.unsqueeze(-1)
elif len(query.shape) < 3:
query = query.unsqueeze(0)
scores = (key * query).sum(dim=2)
if mask is not None:
scores += mask * -99999
if self.copy:
scores, copy_scores = torch.chunk(scores, 2, dim=-1)
copy_distribution = F.softmax(copy_scores.squeeze(-1), dim=0)
distribution = F.softmax(scores.squeeze(-1), dim=0)
else:
distribution = F.softmax(scores, dim=0)
copy_distribution = distribution
weighted = memory * distribution.unsqueeze(2).expand_as(memory)
summary = weighted.sum(dim=0, keepdim=True)
return summary, distribution, copy_distribution
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'n_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tmp7 = tmp6 + tmp2
tmp8 = tmp5 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp11 + tmp2
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp16 + tmp2
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](primals_1, buf0, primals_3,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (1, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_mul_sum_3[grid(64)](primals_1, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf4, buf3, primals_1, reinterpret_tensor(primals_4, (64, 4), (4,
1), 0), buf3
class SimpleAttentionNew(nn.Module):
def __init__(self, n_features, n_hidden, key=False, copy=False, query=
True, memory=False):
super().__init__()
self.key = key
self.query = query
self.memory = memory
self.n_features = n_features
self.n_hidden = n_hidden
self.copy = copy
if self.copy:
assert self.query
if self.key:
self.make_key = nn.Linear(n_features, n_hidden)
if self.query:
self.make_query = nn.Linear(n_features, (1 + copy) * n_hidden)
if self.memory:
self.make_memory = nn.Linear(n_features, n_hidden)
self.n_out = n_hidden
def forward(self, input_0, input_1):
primals_2 = self.make_query.weight
primals_3 = self.make_query.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1], output[2]
|
TahaBinhuraib/lexical
|
SimpleAttention
| false
| 2,878
|
[
"MIT"
] | 0
|
0af02590829755f9ae2268fed76ea4b6d38e9b61
|
https://github.com/TahaBinhuraib/lexical/tree/0af02590829755f9ae2268fed76ea4b6d38e9b61
|
ResBlock
|
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, x):
shortcut = x
out = self.relu(self.norm1(x))
if self.downsample is not None:
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out + shortcut
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(16)](primals_1,
primals_2, primals_3, buf0, buf3, buf12, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_2
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_native_group_norm_relu_0[grid(16)](buf4, primals_5,
primals_6, buf5, buf9, buf8, 16, 16, XBLOCK=8, num_warps=2,
num_stages=1)
del primals_6
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_add_1[grid(256)](buf11, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4,
reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(
buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1),
(4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0))
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlockNew, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, input_0):
primals_2 = self.norm1.weight
primals_3 = self.norm1.bias
primals_4 = self.conv1.weight
primals_5 = self.norm2.weight
primals_6 = self.norm2.bias
primals_7 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Teemo341/BDNN
|
ResBlock
| false
| 2,880
|
[
"Apache-2.0"
] | 0
|
d53d4634a7a43d038faa049d7dfd10b3578ae267
|
https://github.com/Teemo341/BDNN/tree/d53d4634a7a43d038faa049d7dfd10b3578ae267
|
RadialBesselLayer
|
import torch
import numpy as np
from torch import nn
class RadialBesselLayer(nn.Module):
"""Radial Bessel functions based on the work by DimeNet: https://github.com/klicperajo/dimenet
Args:
n_radials (int, optional): total number of radial functions, :math:`N_g`.
cutoff (float, optional): cutoff, :math:`\\mu_{r_c}`
"""
def __init__(self, n_radial=16, cutoff=5.0, device=None):
super(RadialBesselLayer, self).__init__()
self.inv_cutoff = 1 / cutoff
self.frequencies = nn.Parameter(torch.tensor(np.arange(1, n_radial +
1) * np.pi, device=device), requires_grad=False)
def forward(self, distances):
"""Compute smeared-gaussian distance values.
Args:
distances (torch.Tensor): interatomic distance values of
(N_b x N_at x N_nbh) shape.
Returns:
torch.Tensor: layer output of (N_b x N_at x N_nbh x N_g) shape.
"""
d_scaled = distances * self.inv_cutoff
d_scaled = d_scaled.unsqueeze(-1)
out = torch.sin(self.frequencies * d_scaled)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = 0.2
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.float64)
tmp5 = tmp0 * tmp4
tmp6 = libdevice.sin(tmp5)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 16), (1024, 256, 64, 16, 1),
torch.float64)
get_raw_stream(0)
triton_poi_fused_mul_sin_0[grid(4096)](arg1_1, arg0_1, buf0, 4096,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class RadialBesselLayerNew(nn.Module):
"""Radial Bessel functions based on the work by DimeNet: https://github.com/klicperajo/dimenet
Args:
n_radials (int, optional): total number of radial functions, :math:`N_g`.
cutoff (float, optional): cutoff, :math:`\\mu_{r_c}`
"""
def __init__(self, n_radial=16, cutoff=5.0, device=None):
super(RadialBesselLayerNew, self).__init__()
self.inv_cutoff = 1 / cutoff
self.frequencies = nn.Parameter(torch.tensor(np.arange(1, n_radial +
1) * np.pi, device=device), requires_grad=False)
def forward(self, input_0):
arg1_1 = self.frequencies
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
THGLab/NewtonNet
|
RadialBesselLayer
| false
| 2,881
|
[
"MIT"
] | 0
|
fcf2af848a1c998bd08096dcefb58a5610eda03c
|
https://github.com/THGLab/NewtonNet/tree/fcf2af848a1c998bd08096dcefb58a5610eda03c
|
InformedSender
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class InformedSender(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSender, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def forward(self, x, return_embeddings=False):
emb = self.return_embeddings(x)
h = self.conv2(emb)
h = torch.sigmoid(h)
h = h.transpose(1, 2)
h = self.conv3(h)
h = torch.sigmoid(h)
h = h.squeeze(dim=1)
h = h.squeeze(dim=1)
h = self.lin4(h)
h = h.mul(1.0 / self.temp)
logits = F.log_softmax(h, dim=1)
return logits
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'game_size': 4, 'feat_size': 4, 'embedding_size': 4,
'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp16 & xmask, eviction_policy
='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 100
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(rmask & xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tl_math.log(tmp13)
tmp15 = tmp8 - tmp14
tl.store(out_ptr2 + (r1 + 100 * x0), tmp15, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_4, (1, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_5, (100, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf0, buf1, buf2, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del buf3
buf5 = extern_kernels.convolution(buf4, primals_3, stride=(4, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 4), (16, 4, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_sigmoid_1[grid(64)](buf6, 64, XBLOCK=64, num_warps
=1, num_stages=1)
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (4, 1, 4,
4), (16, 4, 4, 1), 0), primals_4, stride=(4, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 1, 4), (4, 4, 4, 1))
buf8 = buf7
del buf7
triton_poi_fused_sigmoid_2[grid(16)](buf8, 16, XBLOCK=16, num_warps
=1, num_stages=1)
buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 100), (1, 4), 0), out=buf9)
buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(4)](buf9, buf12, 4, 100,
XBLOCK=1, num_warps=2, num_stages=1)
del buf9
return buf12, primals_3, primals_4, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48
), buf4, buf6, buf8, buf12, primals_5
class InformedSenderNew(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSenderNew, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def forward(self, input_0):
primals_2 = self.lin1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.lin4.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Shawn-Guo-CN/EGG
|
InformedSender
| false
| 2,882
|
[
"MIT"
] | 0
|
0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
https://github.com/Shawn-Guo-CN/EGG/tree/0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
GumbelSoftMax
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt as sqrt
from itertools import product as product
class _GumbelSoftMax(torch.autograd.Function):
"""
implementing the MixedOp, but carried out in a different way as DARTS
DARTS adds all operations together, then select the maximal one to construct the final network,
however, during the late process, more weights are assigned to the None, this is unreasonable under the
circumstance that per operation has the unsure number of inputs.
Thus, we modifies the original DARTS by applying way in GDAS to test.
This class aims to compute the gradients by ourself.
"""
@staticmethod
def forward(ctx, weights):
weights_norm = F.softmax(weights, dim=-1)
ctx.saved_for_backward = weights_norm
mask = torch.zeros_like(weights_norm)
_, idx = weights_norm.topk(dim=-1, k=1, largest=True)
mask[idx] = 1.0
return mask
@staticmethod
def backward(ctx, grad_output):
gumbel_norm = ctx.saved_for_backward
return gumbel_norm * (1 - gumbel_norm
) * grad_output * gumbel_norm.shape[0]
class GumbelSoftMax(nn.Module):
def __init__(self):
super(GumbelSoftMax, self).__init__()
def forward(self, weights, temp_coeff=1.0):
gumbel = -0.001 * torch.log(-torch.log(torch.rand_like(weights)))
weights = _GumbelSoftMax.apply((weights + gumbel) / temp_coeff)
return weights
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_add_log_mul_neg_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl_math.log(tmp1)
tmp3 = -tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = -0.001
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp12 = tl_math.log(tmp11)
tmp13 = -tmp12
tmp14 = tl_math.log(tmp13)
tmp15 = tmp14 * tmp5
tmp16 = tmp10 + tmp15
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp9, tmp17)
tmp21 = tl_math.log(tmp20)
tmp22 = -tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp23 * tmp5
tmp25 = tmp19 + tmp24
tmp26 = tmp25 * tmp8
tmp27 = triton_helpers.maximum(tmp18, tmp26)
tmp30 = tl_math.log(tmp29)
tmp31 = -tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 * tmp5
tmp34 = tmp28 + tmp33
tmp35 = tmp34 * tmp8
tmp36 = triton_helpers.maximum(tmp27, tmp35)
tmp37 = tmp9 - tmp36
tmp38 = tmp37 * tmp8
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp17 - tmp36
tmp41 = tmp40 * tmp8
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp39 + tmp42
tmp44 = tmp26 - tmp36
tmp45 = tmp44 * tmp8
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp43 + tmp46
tmp48 = tmp35 - tmp36
tmp49 = tmp48 * tmp8
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp47 + tmp50
tl.store(out_ptr0 + x0, tmp36, xmask)
tl.store(out_ptr1 + x0, tmp51, xmask)
@triton.jit
def triton_poi_fused__softmax_add_log_mul_neg_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp10 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl_math.log(tmp1)
tmp3 = -tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = -0.001
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp8
tmp13 = tl_math.exp(tmp12)
tmp15 = tmp13 / tmp14
tl.store(in_out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_zeros_like_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_zeros_like_3(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = 1.0
tl.store(out_ptr0 + (x0 + 64 * tmp4), tmp6, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_log_mul_neg_0[grid(64)](arg0_1, buf1,
buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused__softmax_add_log_mul_neg_1[grid(256)](buf4, arg0_1,
buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf2
del buf3
buf5 = torch.ops.aten.topk.default(buf4, 1)
buf7 = buf5[1]
del buf5
buf8 = buf4
del buf4
triton_poi_fused_zeros_like_2[grid(256)](buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
triton_poi_fused_index_put_lift_fresh_zeros_like_3[grid(4096)](buf7,
buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
return buf8,
class _GumbelSoftMax(torch.autograd.Function):
"""
implementing the MixedOp, but carried out in a different way as DARTS
DARTS adds all operations together, then select the maximal one to construct the final network,
however, during the late process, more weights are assigned to the None, this is unreasonable under the
circumstance that per operation has the unsure number of inputs.
Thus, we modifies the original DARTS by applying way in GDAS to test.
This class aims to compute the gradients by ourself.
"""
@staticmethod
def forward(ctx, weights):
weights_norm = F.softmax(weights, dim=-1)
ctx.saved_for_backward = weights_norm
mask = torch.zeros_like(weights_norm)
_, idx = weights_norm.topk(dim=-1, k=1, largest=True)
mask[idx] = 1.0
return mask
@staticmethod
def backward(ctx, grad_output):
gumbel_norm = ctx.saved_for_backward
return gumbel_norm * (1 - gumbel_norm
) * grad_output * gumbel_norm.shape[0]
class GumbelSoftMaxNew(nn.Module):
def __init__(self):
super(GumbelSoftMaxNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
TayaPenskaya/hair_classifier
|
GumbelSoftMax
| false
| 2,883
|
[
"MIT"
] | 0
|
669f42b705a4dd0bdde9d330db5214a3213db5a5
|
https://github.com/TayaPenskaya/hair_classifier/tree/669f42b705a4dd0bdde9d330db5214a3213db5a5
|
PositionWiseFFN
|
import torch
import torch.nn as nn
from random import *
from torch.nn.functional import relu
class PositionWiseFFN(nn.Module):
def __init__(self, model_dim, dropout=0.0):
super().__init__()
dff = model_dim * 4
self.l = nn.Linear(model_dim, dff)
self.o = nn.Linear(dff, model_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
def forward(self, x):
o = relu(self.l(x))
o = self.o(o)
o = self.dropout(o)
o = self.layer_norm(x + o)
return o
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from random import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf6, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf2,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](primals_3, buf2,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 16), (
16, 1), 0), buf2, primals_4, buf6
class PositionWiseFFNNew(nn.Module):
def __init__(self, model_dim, dropout=0.0):
super().__init__()
dff = model_dim * 4
self.l = nn.Linear(model_dim, dff)
self.o = nn.Linear(dff, model_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
def forward(self, input_0):
primals_1 = self.l.weight
primals_2 = self.l.bias
primals_4 = self.o.weight
primals_5 = self.o.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Tensor-Hu/ExampleModel
|
PositionWiseFFN
| false
| 2,884
|
[
"MIT"
] | 0
|
fbbedc7e50b34972fe99560305790bea3341427b
|
https://github.com/Tensor-Hu/ExampleModel/tree/fbbedc7e50b34972fe99560305790bea3341427b
|
ReinforcedReceiver
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
from torch.distributions import Bernoulli
import torch.distributions
class ReinforcedReceiver(nn.Module):
def __init__(self, n_bits, n_hidden):
super(ReinforcedReceiver, self).__init__()
self.emb_column = nn.Linear(n_bits, n_hidden)
self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden)
self.fc2 = nn.Linear(2 * n_hidden, n_bits)
def forward(self, embedded_message, bits):
embedded_bits = self.emb_column(bits.float())
x = torch.cat([embedded_bits, embedded_message], dim=1)
x = self.fc1(x)
x = F.leaky_relu(x)
x = self.fc2(x)
probs = x.sigmoid()
distr = Bernoulli(probs=probs)
entropy = distr.entropy()
if self.training:
sample = distr.sample()
else:
sample = (probs > 0.5).float()
log_prob = distr.log_prob(sample).sum(dim=1)
return sample, log_prob, entropy
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_bits': 4, 'n_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (8, 8), (8, 1))
assert_size_stride(primals_6, (8,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_4, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 8), (1, 8
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(32)](buf3, primals_6, buf4, buf5,
32, XBLOCK=32, num_warps=1, num_stages=1)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (8, 4), (1, 8
), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_2[grid(16)](buf7, primals_8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
return buf7, buf7, primals_1, buf2, buf4, buf5, buf7, primals_7, primals_5
class ReinforcedReceiverNew(nn.Module):
def __init__(self, n_bits, n_hidden):
super(ReinforcedReceiverNew, self).__init__()
self.emb_column = nn.Linear(n_bits, n_hidden)
self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden)
self.fc2 = nn.Linear(2 * n_hidden, n_bits)
def forward(self, input_0, input_1):
primals_1 = self.emb_column.weight
primals_3 = self.emb_column.bias
primals_5 = self.fc1.weight
primals_6 = self.fc1.bias
primals_7 = self.fc2.weight
primals_8 = self.fc2.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1], output[2]
|
Shawn-Guo-CN/EGG
|
ReinforcedReceiver
| false
| 2,885
|
[
"MIT"
] | 0
|
0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
https://github.com/Shawn-Guo-CN/EGG/tree/0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
QuantPointwiseConv
|
import torch
from torch import nn
class QuantPointwiseConv(nn.Module):
"""Quantized Point-wise Conv"""
def __init__(self, in_channels, out_channels, bias=True, **kwargs):
super().__init__()
self.qlinear = nn.Linear(in_channels, out_channels, bias=bias)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = x.transpose(1, 2)
x = self.qlinear(x)
x = x.transpose(1, 2)
x = self.dequant(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class QuantPointwiseConvNew(nn.Module):
"""Quantized Point-wise Conv"""
def __init__(self, in_channels, out_channels, bias=True, **kwargs):
super().__init__()
self.qlinear = nn.Linear(in_channels, out_channels, bias=bias)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, input_0):
primals_2 = self.qlinear.weight
primals_3 = self.qlinear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TeaPoly/wenet
|
QuantPointwiseConv
| false
| 2,886
|
[
"Apache-2.0"
] | 0
|
5681887e338e4c8b2c75ffc283140e11a9d56a6d
|
https://github.com/TeaPoly/wenet/tree/5681887e338e4c8b2c75ffc283140e11a9d56a6d
|
Discriminator
|
import torch
import torch.nn as nn
class Down2d(nn.Module):
"""docstring for Down2d."""
def __init__(self, in_channel, out_channel, kernel, stride, padding):
super(Down2d, self).__init__()
self.c1 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n1 = nn.InstanceNorm2d(out_channel)
self.c2 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n2 = nn.InstanceNorm2d(out_channel)
def forward(self, x):
x1 = self.c1(x)
x1 = self.n1(x1)
x2 = self.c2(x)
x2 = self.n2(x2)
x3 = x1 * torch.sigmoid(x2)
return x3
class Discriminator(nn.Module):
"""docstring for Discriminator."""
def __init__(self):
super(Discriminator, self).__init__()
self.d1 = Down2d(5, 32, (3, 9), (1, 1), (1, 4))
self.d2 = Down2d(36, 32, (3, 8), (1, 2), (1, 3))
self.d3 = Down2d(36, 32, (3, 8), (1, 2), (1, 3))
self.d4 = Down2d(36, 32, (3, 6), (1, 2), (1, 2))
self.conv = nn.Conv2d(36, 1, (36, 5), (36, 1), (0, 2))
self.pool = nn.AvgPool2d((1, 64))
def forward(self, x, c):
c = c.view(c.size(0), c.size(1), 1, 1)
c1 = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c1], dim=1)
x = self.d1(x)
c2 = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c2], dim=1)
x = self.d2(x)
c3 = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c3], dim=1)
x = self.d3(x)
c4 = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c4], dim=1)
x = self.d4(x)
c5 = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c5], dim=1)
x = self.conv(x)
x = self.pool(x)
x = torch.squeeze(x)
x = torch.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 1, 512, 512]), torch.rand([4, 4, 1, 1])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 262144 % 5
x0 = xindex % 262144
x2 = xindex // 1310720
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 262144 * x2), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + (-1 + x1)), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x4 = xindex
x1 = xindex // 4 % 32
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
tmp0 = tl.load(in_out_ptr0 + (r3 + 65536 * x4), xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r3 + 65536 * x4), tmp2, xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp5, xmask)
tl.store(out_ptr2 + x4, tmp6, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 128
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 262144.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = xindex // 262144
x2 = xindex // 8388608
x5 = xindex % 8388608
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, None)
tmp6 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x4, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp7 = tmp5 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp4 * tmp10
tl.store(out_ptr0 + (x5 + 9437184 * x2), tmp11, None)
@triton.jit
def triton_poi_fused_repeat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 262144
x2 = xindex // 1048576
x4 = xindex % 1048576
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + 9437184 * x2), tmp0, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_5(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x4 = xindex
x1 = xindex // 4 % 32
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_out_ptr0 + (r3 + 32768 * x4), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r3 + 32768 * x4), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp5, xmask)
tl.store(out_ptr2 + x4, tmp6, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_6(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 128
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 131072.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = xindex // 131072
x2 = xindex // 4194304
x5 = xindex % 4194304
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, None)
tmp6 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x4, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp7 = tmp5 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp4 * tmp10
tl.store(out_ptr0 + (x5 + 4718592 * x2), tmp11, None)
@triton.jit
def triton_poi_fused_repeat_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 131072
x2 = xindex // 524288
x4 = xindex % 524288
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + 4718592 * x2), tmp0, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_9(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x4 = xindex
x1 = xindex // 4 % 32
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_out_ptr0 + (r3 + 16384 * x4), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r3 + 16384 * x4), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp5, xmask)
tl.store(out_ptr2 + x4, tmp6, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_10(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 128
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 65536.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = xindex // 65536
x2 = xindex // 2097152
x5 = xindex % 2097152
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, None)
tmp6 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x4, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp7 = tmp5 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp4 * tmp10
tl.store(out_ptr0 + (x5 + 2359296 * x2), tmp11, None)
@triton.jit
def triton_poi_fused_repeat_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 65536
x2 = xindex // 262144
x4 = xindex % 262144
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + 2359296 * x2), tmp0, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_13(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x4 = xindex
x1 = xindex // 4 % 32
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_out_ptr0 + (r3 + 8192 * x4), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r3 + 8192 * x4), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp5, xmask)
tl.store(out_ptr2 + x4, tmp6, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_14(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 128
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = xindex // 32768
x2 = xindex // 1048576
x5 = xindex % 1048576
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, None)
tmp6 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x4, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp7 = tmp5 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp4 * tmp10
tl.store(out_ptr0 + (x5 + 1179648 * x2), tmp11, None)
@triton.jit
def triton_poi_fused_repeat_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 32768
x2 = xindex // 131072
x4 = xindex % 131072
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + 1179648 * x2), tmp0, None)
@triton.jit
def triton_poi_fused_convolution_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 3584
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_tanh_18(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 56
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 1, 512, 512), (262144, 262144, 512, 1))
assert_size_stride(primals_3, (32, 5, 3, 9), (135, 27, 9, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32, 5, 3, 9), (135, 27, 9, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (32, 36, 3, 8), (864, 24, 8, 1))
assert_size_stride(primals_8, (32,), (1,))
assert_size_stride(primals_9, (32, 36, 3, 8), (864, 24, 8, 1))
assert_size_stride(primals_10, (32,), (1,))
assert_size_stride(primals_11, (32, 36, 3, 8), (864, 24, 8, 1))
assert_size_stride(primals_12, (32,), (1,))
assert_size_stride(primals_13, (32, 36, 3, 8), (864, 24, 8, 1))
assert_size_stride(primals_14, (32,), (1,))
assert_size_stride(primals_15, (32, 36, 3, 6), (648, 18, 6, 1))
assert_size_stride(primals_16, (32,), (1,))
assert_size_stride(primals_17, (32, 36, 3, 6), (648, 18, 6, 1))
assert_size_stride(primals_18, (32,), (1,))
assert_size_stride(primals_19, (1, 36, 36, 5), (6480, 180, 5, 1))
assert_size_stride(primals_20, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 5, 512, 512), (1310720, 262144, 512,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(5242880)](primals_2, primals_1, buf0,
5242880, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 512, 512), (8388608, 262144, 512, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((1, 128, 1, 1, 4), (512, 4, 512, 512, 1),
torch.float32)
buf4 = empty_strided_cuda((1, 128, 1, 1, 4), (512, 4, 512, 512, 1),
torch.float32)
buf5 = empty_strided_cuda((1, 128, 1, 1, 4), (512, 4, 512, 512, 1),
torch.float32)
triton_red_fused__native_batch_norm_legit_convolution_1[grid(512)](buf2
, primals_4, buf3, buf4, buf5, 512, 65536, XBLOCK=1, RBLOCK=
2048, num_warps=16, num_stages=1)
del primals_4
buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32
)
buf7 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch
.float32)
buf9 = reinterpret_tensor(buf7, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf7
triton_per_fused__native_batch_norm_legit_2[grid(128)](buf9, buf3,
buf4, buf5, buf6, 128, 4, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf0, primals_5, stride=(1, 1),
padding=(1, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 512, 512), (8388608, 262144, 512, 1))
buf11 = buf10
del buf10
buf12 = buf5
del buf5
buf13 = buf4
del buf4
buf14 = buf3
del buf3
triton_red_fused__native_batch_norm_legit_convolution_1[grid(512)](
buf11, primals_6, buf12, buf13, buf14, 512, 65536, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_6
buf15 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf16 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf18 = reinterpret_tensor(buf16, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf16
triton_per_fused__native_batch_norm_legit_2[grid(128)](buf18, buf12,
buf13, buf14, buf15, 128, 4, XBLOCK=128, num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((4, 36, 512, 512), (9437184, 262144, 512,
1), torch.float32)
buf19 = reinterpret_tensor(buf21, (4, 32, 512, 512), (9437184,
262144, 512, 1), 0)
triton_poi_fused_mul_sigmoid_3[grid(33554432)](buf2, buf6, buf9,
buf11, buf15, buf18, buf19, 33554432, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = reinterpret_tensor(buf21, (4, 4, 512, 512), (9437184,
262144, 512, 1), 8388608)
triton_poi_fused_repeat_4[grid(4194304)](primals_1, buf20, 4194304,
XBLOCK=512, num_warps=8, num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_7, stride=(1, 2),
padding=(1, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 32, 512, 256), (4194304, 131072, 256, 1))
buf23 = buf22
del buf22
buf24 = buf14
del buf14
buf25 = buf13
del buf13
buf26 = buf12
del buf12
triton_red_fused__native_batch_norm_legit_convolution_5[grid(512)](
buf23, primals_8, buf24, buf25, buf26, 512, 32768, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_8
buf27 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf28 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf30 = reinterpret_tensor(buf28, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf28
triton_per_fused__native_batch_norm_legit_6[grid(128)](buf30, buf24,
buf25, buf26, buf27, 128, 4, XBLOCK=128, num_warps=4, num_stages=1)
buf31 = extern_kernels.convolution(buf21, primals_9, stride=(1, 2),
padding=(1, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 32, 512, 256), (4194304, 131072, 256, 1))
buf32 = buf31
del buf31
buf33 = buf26
del buf26
buf34 = buf25
del buf25
buf35 = buf24
del buf24
triton_red_fused__native_batch_norm_legit_convolution_5[grid(512)](
buf32, primals_10, buf33, buf34, buf35, 512, 32768, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_10
buf36 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf37 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf39 = reinterpret_tensor(buf37, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf37
triton_per_fused__native_batch_norm_legit_6[grid(128)](buf39, buf33,
buf34, buf35, buf36, 128, 4, XBLOCK=128, num_warps=4, num_stages=1)
buf42 = empty_strided_cuda((4, 36, 512, 256), (4718592, 131072, 256,
1), torch.float32)
buf40 = reinterpret_tensor(buf42, (4, 32, 512, 256), (4718592,
131072, 256, 1), 0)
triton_poi_fused_mul_sigmoid_7[grid(16777216)](buf23, buf27, buf30,
buf32, buf36, buf39, buf40, 16777216, XBLOCK=1024, num_warps=4,
num_stages=1)
buf41 = reinterpret_tensor(buf42, (4, 4, 512, 256), (4718592,
131072, 256, 1), 4194304)
triton_poi_fused_repeat_8[grid(2097152)](primals_1, buf41, 2097152,
XBLOCK=1024, num_warps=4, num_stages=1)
buf43 = extern_kernels.convolution(buf42, primals_11, stride=(1, 2),
padding=(1, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 32, 512, 128), (2097152, 65536, 128, 1))
buf44 = buf43
del buf43
buf45 = buf35
del buf35
buf46 = buf34
del buf34
buf47 = buf33
del buf33
triton_red_fused__native_batch_norm_legit_convolution_9[grid(512)](
buf44, primals_12, buf45, buf46, buf47, 512, 16384, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_12
buf48 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf49 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf51 = reinterpret_tensor(buf49, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf49
triton_per_fused__native_batch_norm_legit_10[grid(128)](buf51,
buf45, buf46, buf47, buf48, 128, 4, XBLOCK=32, num_warps=2,
num_stages=1)
buf52 = extern_kernels.convolution(buf42, primals_13, stride=(1, 2),
padding=(1, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 32, 512, 128), (2097152, 65536, 128, 1))
buf53 = buf52
del buf52
buf54 = buf47
del buf47
buf55 = buf46
del buf46
buf56 = buf45
del buf45
triton_red_fused__native_batch_norm_legit_convolution_9[grid(512)](
buf53, primals_14, buf54, buf55, buf56, 512, 16384, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_14
buf57 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf58 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf60 = reinterpret_tensor(buf58, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf58
triton_per_fused__native_batch_norm_legit_10[grid(128)](buf60,
buf54, buf55, buf56, buf57, 128, 4, XBLOCK=32, num_warps=2,
num_stages=1)
buf63 = empty_strided_cuda((4, 36, 512, 128), (2359296, 65536, 128,
1), torch.float32)
buf61 = reinterpret_tensor(buf63, (4, 32, 512, 128), (2359296,
65536, 128, 1), 0)
triton_poi_fused_mul_sigmoid_11[grid(8388608)](buf44, buf48, buf51,
buf53, buf57, buf60, buf61, 8388608, XBLOCK=1024, num_warps=4,
num_stages=1)
buf62 = reinterpret_tensor(buf63, (4, 4, 512, 128), (2359296, 65536,
128, 1), 2097152)
triton_poi_fused_repeat_12[grid(1048576)](primals_1, buf62, 1048576,
XBLOCK=1024, num_warps=4, num_stages=1)
buf64 = extern_kernels.convolution(buf63, primals_15, stride=(1, 2),
padding=(1, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 32, 512, 64), (1048576, 32768, 64, 1))
buf65 = buf64
del buf64
buf66 = buf56
del buf56
buf67 = buf55
del buf55
buf68 = buf54
del buf54
triton_red_fused__native_batch_norm_legit_convolution_13[grid(512)](
buf65, primals_16, buf66, buf67, buf68, 512, 8192, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_16
buf69 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf70 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf72 = reinterpret_tensor(buf70, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf70
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf72,
buf66, buf67, buf68, buf69, 128, 4, XBLOCK=8, num_warps=2,
num_stages=1)
buf73 = extern_kernels.convolution(buf63, primals_17, stride=(1, 2),
padding=(1, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf73, (4, 32, 512, 64), (1048576, 32768, 64, 1))
buf74 = buf73
del buf73
buf75 = buf68
del buf68
buf76 = buf67
del buf67
buf77 = buf66
del buf66
triton_red_fused__native_batch_norm_legit_convolution_13[grid(512)](
buf74, primals_18, buf75, buf76, buf77, 512, 8192, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_18
buf78 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf79 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf81 = reinterpret_tensor(buf79, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf79
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf81,
buf75, buf76, buf77, buf78, 128, 4, XBLOCK=8, num_warps=2,
num_stages=1)
del buf75
del buf76
del buf77
buf84 = empty_strided_cuda((4, 36, 512, 64), (1179648, 32768, 64, 1
), torch.float32)
buf82 = reinterpret_tensor(buf84, (4, 32, 512, 64), (1179648, 32768,
64, 1), 0)
triton_poi_fused_mul_sigmoid_15[grid(4194304)](buf65, buf69, buf72,
buf74, buf78, buf81, buf82, 4194304, XBLOCK=1024, num_warps=4,
num_stages=1)
buf83 = reinterpret_tensor(buf84, (4, 4, 512, 64), (1179648, 32768,
64, 1), 1048576)
triton_poi_fused_repeat_16[grid(524288)](primals_1, buf83, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf85 = extern_kernels.convolution(buf84, primals_19, stride=(36, 1
), padding=(0, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf85, (4, 1, 14, 64), (896, 896, 64, 1))
buf86 = buf85
del buf85
triton_poi_fused_convolution_17[grid(3584)](buf86, primals_20, 3584,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_20
buf87 = torch.ops.aten.avg_pool2d.default(buf86, [1, 64], [1, 64],
[0, 0], False, True, None)
buf88 = buf87
del buf87
buf89 = reinterpret_tensor(buf88, (4, 14), (14, 1), 0)
del buf88
triton_poi_fused_tanh_18[grid(56)](buf89, 56, XBLOCK=64, num_warps=
1, num_stages=1)
return (buf89, primals_3, primals_5, primals_7, primals_9, primals_11,
primals_13, primals_15, primals_17, primals_19, buf0, buf2, buf6,
buf9, buf11, buf15, buf18, buf21, buf23, buf27, buf30, buf32, buf36,
buf39, buf42, buf44, buf48, buf51, buf53, buf57, buf60, buf63,
buf65, buf69, buf72, buf74, buf78, buf81, buf84, buf86, buf89)
class Down2d(nn.Module):
"""docstring for Down2d."""
def __init__(self, in_channel, out_channel, kernel, stride, padding):
super(Down2d, self).__init__()
self.c1 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n1 = nn.InstanceNorm2d(out_channel)
self.c2 = nn.Conv2d(in_channel, out_channel, kernel_size=kernel,
stride=stride, padding=padding)
self.n2 = nn.InstanceNorm2d(out_channel)
def forward(self, x):
x1 = self.c1(x)
x1 = self.n1(x1)
x2 = self.c2(x)
x2 = self.n2(x2)
x3 = x1 * torch.sigmoid(x2)
return x3
class DiscriminatorNew(nn.Module):
"""docstring for Discriminator."""
def __init__(self):
super(DiscriminatorNew, self).__init__()
self.d1 = Down2d(5, 32, (3, 9), (1, 1), (1, 4))
self.d2 = Down2d(36, 32, (3, 8), (1, 2), (1, 3))
self.d3 = Down2d(36, 32, (3, 8), (1, 2), (1, 3))
self.d4 = Down2d(36, 32, (3, 6), (1, 2), (1, 2))
self.conv = nn.Conv2d(36, 1, (36, 5), (36, 1), (0, 2))
self.pool = nn.AvgPool2d((1, 64))
def forward(self, input_0, input_1):
primals_3 = self.d1.c1.weight
primals_4 = self.d1.c1.bias
primals_5 = self.d1.c2.weight
primals_6 = self.d1.c2.bias
primals_7 = self.d2.c1.weight
primals_8 = self.d2.c1.bias
primals_9 = self.d2.c2.weight
primals_10 = self.d2.c2.bias
primals_11 = self.d3.c1.weight
primals_12 = self.d3.c1.bias
primals_13 = self.d3.c2.weight
primals_14 = self.d3.c2.bias
primals_15 = self.d4.c1.weight
primals_16 = self.d4.c1.bias
primals_17 = self.d4.c2.weight
primals_18 = self.d4.c2.bias
primals_19 = self.conv.weight
primals_20 = self.conv.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
|
Shimamura-Lab-SU/SGV
|
Discriminator
| false
| 2,887
|
[
"MIT"
] | 0
|
8df3c314532528b8597c5dbb28bdfb23155bee82
|
https://github.com/Shimamura-Lab-SU/SGV/tree/8df3c314532528b8597c5dbb28bdfb23155bee82
|
Drift
|
import torch
import torch.nn as nn
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class Drift(nn.Module):
def __init__(self, dim):
super(Drift, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU(inplace=True)
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm3 = norm(dim)
def forward(self, t, x):
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(t, out)
out = self.norm3(out)
return out
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x2 + 80 * x3), tmp29, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
tl.store(out_ptr1 + (x0 + 80 * x1), tmp0, xmask)
@triton.jit
def triton_per_fused_convolution_native_group_norm_relu_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr1 + (r2 + 16 * x0 + 80 * x1), tmp31, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_per_fused_convolution_native_group_norm_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp29, xmask)
tl.store(out_ptr3 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_5, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf5 = reinterpret_tensor(buf6, (4, 4, 4, 4), (80, 16, 4, 1), 16)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(16)](buf3, primals_3,
primals_1, primals_2, buf0, buf5, 16, 16, XBLOCK=8, num_warps=2,
num_stages=1)
buf4 = reinterpret_tensor(buf6, (4, 1, 4, 4), (80, 16, 4, 1), 0)
buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(64)](primals_4, buf4, buf13, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf12 = reinterpret_tensor(buf10, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf10
buf14 = reinterpret_tensor(buf15, (4, 4, 4, 4), (80, 16, 4, 1), 16)
triton_per_fused_convolution_native_group_norm_relu_2[grid(16)](buf8,
buf12, primals_6, primals_7, primals_8, buf9, buf14, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_6
buf16 = extern_kernels.convolution(buf15, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1))
buf17 = buf16
del buf16
buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_convolution_native_group_norm_3[grid(16)](buf17,
primals_10, primals_11, primals_12, buf18, buf21, buf22, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_10
del primals_12
return (buf21, primals_1, primals_2, primals_3, primals_5, primals_7,
primals_8, primals_9, primals_11, buf0, buf3, buf6, buf8, buf9,
buf12, buf15, buf17, reinterpret_tensor(buf18, (4, 4), (4, 1), 0),
reinterpret_tensor(buf22, (4, 4), (4, 1), 0))
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class DriftNew(nn.Module):
def __init__(self, dim):
super(DriftNew, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU(inplace=True)
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm3 = norm(dim)
def forward(self, input_0, input_1):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_5 = self.conv1._layer.weight
primals_6 = self.conv1._layer.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.conv2._layer.weight
primals_10 = self.conv2._layer.bias
primals_11 = self.norm3.weight
primals_12 = self.norm3.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
Teemo341/BDNN
|
Drift
| false
| 2,888
|
[
"Apache-2.0"
] | 0
|
d53d4634a7a43d038faa049d7dfd10b3578ae267
|
https://github.com/Teemo341/BDNN/tree/d53d4634a7a43d038faa049d7dfd10b3578ae267
|
Encoder
|
import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, latent_dim):
super(Encoder, self).__init__()
self.FC_input = nn.Linear(input_dim, hidden_dim)
self.FC_mean = nn.Linear(hidden_dim, latent_dim)
self.FC_var = nn.Linear(hidden_dim, latent_dim)
self.training = True
def forward(self, x):
h_ = torch.relu(self.FC_input(x))
mean = self.FC_mean(h_)
log_var = self.FC_var(h_)
std = torch.exp(0.5 * log_var)
z = self.reparameterization(mean, std)
return z, mean, log_var
def reparameterization(self, mean, std):
epsilon = torch.rand_like(std)
z = mean + std * epsilon
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4, 'latent_dim': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](buf2, buf3, buf5, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, primals_6, primals_4, buf7
class EncoderNew(nn.Module):
def __init__(self, input_dim, hidden_dim, latent_dim):
super(EncoderNew, self).__init__()
self.FC_input = nn.Linear(input_dim, hidden_dim)
self.FC_mean = nn.Linear(hidden_dim, latent_dim)
self.FC_var = nn.Linear(hidden_dim, latent_dim)
self.training = True
def reparameterization(self, mean, std):
epsilon = torch.rand_like(std)
z = mean + std * epsilon
return z
def forward(self, input_0):
primals_1 = self.FC_input.weight
primals_2 = self.FC_input.bias
primals_4 = self.FC_mean.weight
primals_5 = self.FC_mean.bias
primals_6 = self.FC_var.weight
primals_7 = self.FC_var.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
|
TeoAndB/dtu_mlops
|
Encoder
| false
| 2,889
|
[
"Apache-2.0"
] | 0
|
671d8922298554659fd9697f0ebca7e8bfa0e8c2
|
https://github.com/TeoAndB/dtu_mlops/tree/671d8922298554659fd9697f0ebca7e8bfa0e8c2
|
GCN_encoder
|
import torch
import torch.nn as nn
import torch.nn.init as init
class GraphConv(nn.Module):
def __init__(self, input_dim, output_dim):
super(GraphConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.weight = nn.Parameter(torch.FloatTensor(input_dim, output_dim))
def forward(self, x, adj):
y = torch.matmul(adj, x)
y = torch.matmul(y, self.weight)
return y
class GCN_encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(GCN_encoder, self).__init__()
self.conv1 = GraphConv(input_dim=input_dim, output_dim=hidden_dim)
self.conv2 = GraphConv(input_dim=hidden_dim, output_dim=output_dim)
self.relu = nn.ReLU()
for m in self.modules():
if isinstance(m, GraphConv):
m.weight.data = init.xavier_uniform(m.weight.data, gain=nn.
init.calculate_gain('relu'))
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, adj):
x = self.conv1(x, adj)
x = self.relu(x)
x = self.conv2(x, adj)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
buf4 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
del buf2
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
primals_4, out=buf4)
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (16, 4,
4), (16, 1, 4), 0), buf5, reinterpret_tensor(buf0, (4, 64), (1, 4), 0)
class GraphConv(nn.Module):
def __init__(self, input_dim, output_dim):
super(GraphConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.weight = nn.Parameter(torch.FloatTensor(input_dim, output_dim))
def forward(self, x, adj):
y = torch.matmul(adj, x)
y = torch.matmul(y, self.weight)
return y
class GCN_encoderNew(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(GCN_encoderNew, self).__init__()
self.conv1 = GraphConv(input_dim=input_dim, output_dim=hidden_dim)
self.conv2 = GraphConv(input_dim=hidden_dim, output_dim=output_dim)
self.relu = nn.ReLU()
for m in self.modules():
if isinstance(m, GraphConv):
m.weight.data = init.xavier_uniform(m.weight.data, gain=nn.
init.calculate_gain('relu'))
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0, input_1):
primals_3 = self.conv1.weight
primals_4 = self.conv2.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Qin-Folks/graph-generation
|
GCN_encoder
| false
| 2,890
|
[
"MIT"
] | 0
|
afe1b697272b0e683b4551918de36f57f714e70b
|
https://github.com/Qin-Folks/graph-generation/tree/afe1b697272b0e683b4551918de36f57f714e70b
|
Network
|
import torch
import torch.nn as nn
class Network(nn.Module):
def __init__(self, input_shape, output_shape, n_features, **kwargs):
super(Network, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h1 = nn.Linear(n_input, n_features)
self._h2 = nn.Linear(n_features, n_features)
self._h3 = nn.Linear(n_features, n_output)
nn.init.xavier_uniform_(self._h1.weight, gain=nn.init.
calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h2.weight, gain=nn.init.
calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h3.weight, gain=nn.init.
calculate_gain('linear'))
def forward(self, state, **kwargs):
features1 = torch.tanh(self._h1(torch.squeeze(state, -1).float()))
features2 = torch.tanh(self._h2(features1))
a = self._h3(features2)
return a
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_shape': [4, 4], 'output_shape': [4, 4],
'n_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class NetworkNew(nn.Module):
def __init__(self, input_shape, output_shape, n_features, **kwargs):
super(NetworkNew, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h1 = nn.Linear(n_input, n_features)
self._h2 = nn.Linear(n_features, n_features)
self._h3 = nn.Linear(n_features, n_output)
nn.init.xavier_uniform_(self._h1.weight, gain=nn.init.
calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h2.weight, gain=nn.init.
calculate_gain('tanh'))
nn.init.xavier_uniform_(self._h3.weight, gain=nn.init.
calculate_gain('linear'))
def forward(self, input_0):
primals_2 = self._h1.weight
primals_3 = self._h1.bias
primals_4 = self._h2.weight
primals_5 = self._h2.bias
primals_6 = self._h3.weight
primals_7 = self._h3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
TheCamusean/mushroom-rl
|
Network
| false
| 2,891
|
[
"MIT"
] | 0
|
48585f883e546ea57224b8d446ecb9b8ba90cf73
|
https://github.com/TheCamusean/mushroom-rl/tree/48585f883e546ea57224b8d446ecb9b8ba90cf73
|
TransformerEncoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class TransformerEncoderLayer(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def forward(self, x, key_padding_mask=None, attn_mask=None):
residual = x
x = self.self_attn_layer_norm(x)
x, _att = self.self_attn(query=x, key=x, value=x, key_padding_mask=
key_padding_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(16)](buf17, primals_11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (4, 4), (1,
4), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, input_0):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_3 = self.self_attn_layer_norm.weight
primals_7 = self.self_attn_layer_norm.bias
primals_6 = self.fc1.weight
primals_8 = self.fc1.bias
primals_10 = self.fc2.weight
primals_9 = self.fc2.bias
primals_11 = self.layer_norm.weight
primals_13 = self.layer_norm.bias
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Shawn-Guo-CN/EGG
|
TransformerEncoderLayer
| false
| 2,892
|
[
"MIT"
] | 0
|
0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
https://github.com/Shawn-Guo-CN/EGG/tree/0a5b258108e2cd1c873d7f67e8c92551bb3d809c
|
CriticNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CriticNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain
('relu'))
def forward(self, state, action):
state_action = torch.cat((state.float(), action.float()), dim=1)
q = F.relu(self._h(state_action))
return torch.squeeze(q)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_shape': [4, 4], 'output_shape': [4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_squeeze_threshold_backward_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool)
triton_poi_fused_relu_squeeze_threshold_backward_1[grid(512)](buf1,
primals_4, buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_4
return buf2, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf3
class CriticNetworkNew(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain
('relu'))
def forward(self, input_0, input_1):
primals_3 = self._h.weight
primals_4 = self._h.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
TheCamusean/mushroom-rl
|
CriticNetwork
| false
| 2,893
|
[
"MIT"
] | 0
|
48585f883e546ea57224b8d446ecb9b8ba90cf73
|
https://github.com/TheCamusean/mushroom-rl/tree/48585f883e546ea57224b8d446ecb9b8ba90cf73
|
ActorNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetwork, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain
('relu'))
def forward(self, state):
return F.relu(self._h(torch.squeeze(state, 1).float()))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_shape': [4, 4], 'output_shape': [4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2
class ActorNetworkNew(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetworkNew, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight, gain=nn.init.calculate_gain
('relu'))
def forward(self, input_0):
primals_2 = self._h.weight
primals_3 = self._h.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TheCamusean/mushroom-rl
|
ActorNetwork
| false
| 2,895
|
[
"MIT"
] | 0
|
48585f883e546ea57224b8d446ecb9b8ba90cf73
|
https://github.com/TheCamusean/mushroom-rl/tree/48585f883e546ea57224b8d446ecb9b8ba90cf73
|
ConcatBlock
|
import torch
import torch.nn as nn
import torch.nn.functional
class ConcatBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConcatBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(self.in_chns, self.in_chns, kernel_size=1,
padding=0)
self.conv2 = nn.Conv2d(self.in_chns, self.out_chns, kernel_size=1,
padding=0)
self.ac1 = nn.LeakyReLU()
self.ac2 = nn.LeakyReLU()
def forward(self, x):
x = self.conv1(x)
x = self.ac1(x)
x = self.conv2(x)
x = self.ac2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0,
primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf3,
primals_5, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4
class ConcatBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConcatBlockNew, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(self.in_chns, self.in_chns, kernel_size=1,
padding=0)
self.conv2 = nn.Conv2d(self.in_chns, self.out_chns, kernel_size=1,
padding=0)
self.ac1 = nn.LeakyReLU()
self.ac2 = nn.LeakyReLU()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TheSeaOfStars123/SSL4MIS
|
ConcatBlock
| false
| 2,896
|
[
"MIT"
] | 0
|
a3fb6e8c996683eb79dc3f20e965064b7f5d2b3d
|
https://github.com/TheSeaOfStars123/SSL4MIS/tree/a3fb6e8c996683eb79dc3f20e965064b7f5d2b3d
|
MLP
|
import torch
import torch.nn as nn
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLP(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduces a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLPNew(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduces a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TheSecret3003/crf_parser
|
MLP
| false
| 2,897
|
[
"MIT"
] | 0
|
34682ca8729d376b5582a3117e650b524fbcb355
|
https://github.com/TheSecret3003/crf_parser/tree/34682ca8729d376b5582a3117e650b524fbcb355
|
OutPutBlock
|
import torch
import torch.nn as nn
import torch.nn.functional
class OutPutBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutPutBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(self.in_chns, self.in_chns // 2, kernel_size
=1, padding=0)
self.conv2 = nn.Conv2d(self.in_chns // 2, self.out_chns,
kernel_size=1, padding=0)
self.drop1 = nn.Dropout2d(0.3)
self.drop2 = nn.Dropout2d(0.3)
self.ac1 = nn.LeakyReLU()
def forward(self, x):
x = self.drop1(x)
x = self.conv1(x)
x = self.ac1(x)
x = self.drop2(x)
x = self.conv2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 2
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1))
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(128)](buf0,
primals_3, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_1, primals_2, primals_4, buf1, buf2
class OutPutBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutPutBlockNew, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(self.in_chns, self.in_chns // 2, kernel_size
=1, padding=0)
self.conv2 = nn.Conv2d(self.in_chns // 2, self.out_chns,
kernel_size=1, padding=0)
self.drop1 = nn.Dropout2d(0.3)
self.drop2 = nn.Dropout2d(0.3)
self.ac1 = nn.LeakyReLU()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
TheSeaOfStars123/SSL4MIS
|
OutPutBlock
| false
| 2,898
|
[
"MIT"
] | 0
|
a3fb6e8c996683eb79dc3f20e965064b7f5d2b3d
|
https://github.com/TheSeaOfStars123/SSL4MIS/tree/a3fb6e8c996683eb79dc3f20e965064b7f5d2b3d
|
random_resize
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
def resize_4d_tensor_by_factor(x, height_factor, width_factor):
res = F.interpolate(x, scale_factor=(height_factor, width_factor), mode
='bilinear')
return res
class random_resize(nn.Module):
def __init__(self, max_size_factor, min_size_factor):
super().__init__()
self.max_size_factor = max_size_factor
self.min_size_factor = min_size_factor
def forward(self, x):
height_factor = random.uniform(a=self.min_size_factor, b=self.
max_size_factor)
width_factor = random.uniform(a=self.min_size_factor, b=self.
max_size_factor)
resized = resize_4d_tensor_by_factor(x=x, height_factor=
height_factor, width_factor=width_factor)
return resized
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'max_size_factor': 4, 'min_size_factor': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), None,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), None,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = 1.0
tmp30 = triton_helpers.minimum(tmp28, tmp29)
tmp31 = tmp25 * tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), None,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), None,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp30
tmp36 = tmp32 + tmp35
tmp37 = tmp24 + tmp31
tmp38 = tmp37 - tmp36
tmp39 = tmp9.to(tl.float32)
tmp40 = tmp8 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp7)
tmp42 = triton_helpers.minimum(tmp41, tmp29)
tmp43 = tmp38 * tmp42
tmp44 = tmp36 + tmp43
tl.store(in_out_ptr0 + x3, tmp44, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
buf2 = buf0
del buf0
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(4096)](buf3, arg0_1, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf3,
def resize_4d_tensor_by_factor(x, height_factor, width_factor):
res = F.interpolate(x, scale_factor=(height_factor, width_factor), mode
='bilinear')
return res
class random_resizeNew(nn.Module):
def __init__(self, max_size_factor, min_size_factor):
super().__init__()
self.max_size_factor = max_size_factor
self.min_size_factor = min_size_factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Tiamat-Tech/torch-dreams
|
random_resize
| false
| 2,899
|
[
"MIT"
] | 0
|
e1c1795f0a0007f54293c474de5d2b80ee829ab8
|
https://github.com/Tiamat-Tech/torch-dreams/tree/e1c1795f0a0007f54293c474de5d2b80ee829ab8
|
ScalarMix
|
import torch
import torch.nn as nn
class ScalarMix(nn.Module):
"""
Computes a parameterized scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjusts its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, tensors):
"""
Args:
tensors (list[~torch.Tensor]):
:math:`N` tensors to be mixed.
Returns:
The mixture of :math:`N` tensors.
"""
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tmp8 + tmp9
tmp11 = tmp1 * tmp10
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(64)](primals_3, primals_1,
primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4,
4), (16, 4, 1), 0)
class ScalarMixNew(nn.Module):
"""
Computes a parameterized scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjusts its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, input_0):
primals_1 = self.weights
primals_3 = self.gamma
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
TheSecret3003/crf_parser
|
ScalarMix
| false
| 2,900
|
[
"MIT"
] | 0
|
34682ca8729d376b5582a3117e650b524fbcb355
|
https://github.com/TheSecret3003/crf_parser/tree/34682ca8729d376b5582a3117e650b524fbcb355
|
DotProdAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DotProdAttention(nn.Module):
"""Basic Dot-Production Attention"""
def __init__(self):
super().__init__()
def forward(self, output, context):
"""Basic Dot-Production Method
1. compute e = q * k
2. compute tanh(softmax(e) * k)
Args:
output (batch, 1, hidden): output from decoder rnn
context (batch, seq, hidden): output from encoder rnn
Returns:
output (batch, 1, hidden): modified output
attn (batch, 1, seq): attention state in this step
"""
attn = torch.bmm(output, context.transpose(1, 2))
attn = F.softmax(attn, dim=2)
output = F.tanh(torch.bmm(attn, context))
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg0_1, out=buf3)
del arg0_1
buf4 = buf3
del buf3
triton_poi_fused_tanh_2[grid(64)](buf4, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf4, buf2
class DotProdAttentionNew(nn.Module):
"""Basic Dot-Production Attention"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
Tzu-An/ml_seq2seq_attn
|
DotProdAttention
| false
| 2,901
|
[
"Apache-2.0"
] | 0
|
1f29b1156c5e66e2bb5255c6d214c70162c91528
|
https://github.com/Tzu-An/ml_seq2seq_attn/tree/1f29b1156c5e66e2bb5255c6d214c70162c91528
|
AlphaGoCnn
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class AlphaGoCnn(nn.Module):
def __init__(self):
super(AlphaGoCnn, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1)
self.fc1 = nn.Linear(32 * 9 * 9, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 1)
def forward(self, x):
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
x = F.leaky_relu(self.conv3(x), negative_slope=0.1)
x = x.view(-1, 32 * 9 * 9)
x = F.leaky_relu(self.fc1(x), negative_slope=0.1)
x = F.leaky_relu(self.fc2(x), negative_slope=0.1)
x = torch.sigmoid(self.fc3(x)).reshape(-1)
return x
def get_inputs():
return [torch.rand([4, 3, 9, 9])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 10368
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 10368
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 32
x2 = xindex // 2592
x4 = xindex % 2592
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4 + 2688 * x2), tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp4 * tmp6
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (128, 2592), (2592, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (128, 128), (128, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (1, 128), (128, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 9, 9), (2592, 81, 9, 1))
buf1 = empty_strided_cuda((4, 32, 9, 9), (2592, 81, 9, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 9, 9), (2592, 81, 9, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(10368)](buf0,
primals_2, buf1, buf2, 10368, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 9, 9), (2592, 81, 9, 1))
buf4 = empty_strided_cuda((4, 32, 9, 9), (2592, 81, 9, 1), torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(10368)](buf3,
primals_5, buf4, buf5, 10368, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 9, 9), (2592, 81, 9, 1))
buf7 = empty_strided_cuda((4, 32, 9, 9), (2688, 81, 9, 1), torch.bool)
buf8 = buf3
del buf3
triton_poi_fused_convolution_leaky_relu_1[grid(10368)](buf6,
primals_7, buf7, buf8, 10368, XBLOCK=128, num_warps=4, num_stages=1
)
del buf6
del primals_7
buf9 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 2592), (2592, 1), 0),
reinterpret_tensor(primals_8, (2592, 128), (1, 2592), 0), out=buf9)
buf10 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf11 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(512)](buf9, primals_9, buf10,
buf11, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf12 = buf9
del buf9
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (128, 128),
(1, 128), 0), out=buf12)
buf13 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf14 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(512)](buf12, primals_11, buf13,
buf14, 512, XBLOCK=128, num_warps=4, num_stages=1)
del buf12
del primals_11
buf15 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(primals_12, (128, 1), (
1, 128), 0), out=buf15)
buf16 = buf15
del buf15
buf17 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused_sigmoid_sigmoid_backward_3[grid(4)](buf16,
primals_13, buf17, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_13
return (reinterpret_tensor(buf16, (4,), (1,), 0), primals_1, primals_3,
primals_4, primals_6, buf1, buf2, buf4, buf5, buf7,
reinterpret_tensor(buf8, (4, 2592), (2592, 1), 0), buf10, buf11,
buf13, buf14, buf17, primals_12, primals_10, primals_8)
class AlphaGoCnnNew(nn.Module):
def __init__(self):
super(AlphaGoCnnNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1)
self.fc1 = nn.Linear(32 * 9 * 9, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_12 = self.fc3.weight
primals_13 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Theomat/go-enseirb-2020
|
AlphaGoCnn
| false
| 2,902
|
[
"Apache-2.0"
] | 0
|
ae842888dfd61a23d3556c5f63c4474bdbb1685f
|
https://github.com/Theomat/go-enseirb-2020/tree/ae842888dfd61a23d3556c5f63c4474bdbb1685f
|
TimeEncoding
|
import torch
import torch.nn as nn
class TimeEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(TimeEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask, lengths):
time = mask * 1 / (lengths[..., None] - 1)
time = time[:, None] * torch.arange(time.shape[1], device=x.device)[
None, :]
time = time[:, 0].T
x = x + time[..., None]
return self.dropout(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x7 = xindex // 64
x4 = xindex // 256 % 4
x5 = xindex // 1024
x6 = xindex // 4 % 16
x2 = xindex // 16 % 4
x3 = xindex // 64 % 4
x1 = xindex // 4 % 4
x9 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x7), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x6 + 16 * x5 + 64 * x4), None,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x2 + 4 * x5 + 16 * x4 + 64 * x3), None,
eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = tmp3 / tmp5
tmp7 = x1
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp6 * tmp8
tmp10 = tmp0 + tmp9
tl.store(out_ptr0 + x9, tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (4, 16, 1024, 256, 64,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(4096)](arg2_1, arg0_1, arg1_1, buf0,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class TimeEncodingNew(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(TimeEncodingNew, self).__init__()
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Tim-blo/ACTOR
|
TimeEncoding
| false
| 2,903
|
[
"MIT"
] | 0
|
f10d7534a34fa557ab6b1739217649ae4f654505
|
https://github.com/Tim-blo/ACTOR/tree/f10d7534a34fa557ab6b1739217649ae4f654505
|
MyUpsample2
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class MyUpsample2(nn.Module):
def forward(self, x):
return x[:, :, :, None, :, None].expand(-1, -1, -1, 2, -1, 2).reshape(x
.size(0), x.size(1), x.size(2) * 2, x.size(3) * 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 4
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0),
class MyUpsample2New(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
TransformersWsz/onestage_grounding
|
MyUpsample2
| false
| 2,904
|
[
"MIT"
] | 0
|
c939a7d5d7c7f9e1bfa8df2e6269397b8f840b5a
|
https://github.com/TransformersWsz/onestage_grounding/tree/c939a7d5d7c7f9e1bfa8df2e6269397b8f840b5a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.