entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
SoftmaxLoss
|
import torch
import torch.nn as nn
class SoftmaxLoss(nn.Module):
def __init__(self, hidden_dim, speaker_num, **kwargs):
"""
Softmax Loss
"""
super(SoftmaxLoss, self).__init__()
self.fc = nn.Linear(hidden_dim, speaker_num)
self.loss = nn.CrossEntropyLoss()
def forward(self, x_BxH, labels_B):
"""
x shape: (B, H)
labels shape: (B)
"""
logits_BxSpn = self.fc(x_BxH)
loss = self.loss(logits_BxSpn, labels_B)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'speaker_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf3, buf1,
primals_4, 1, 256, num_warps=2, num_stages=1)
del buf1
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class SoftmaxLossNew(nn.Module):
def __init__(self, hidden_dim, speaker_num, **kwargs):
"""
Softmax Loss
"""
super(SoftmaxLossNew, self).__init__()
self.fc = nn.Linear(hidden_dim, speaker_num)
self.loss = nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
albertvillanova/s3prl
|
SoftmaxLoss
| false
| 6,165
|
[
"MIT"
] | 1
|
b127ade4ed2f80a1027901bbd2f204b4fb1aaf03
|
https://github.com/albertvillanova/s3prl/tree/b127ade4ed2f80a1027901bbd2f204b4fb1aaf03
|
Delta
|
import torch
import torch.nn as nn
from torchaudio import transforms
class Delta(nn.Module):
def __init__(self, order=2, **kwargs):
super(Delta, self).__init__()
self.order = order
self.compute_delta = transforms.ComputeDeltas(**kwargs)
def forward(self, x):
feats = [x]
for o in range(self.order):
feat = feats[-1].transpose(0, 1).unsqueeze(0)
delta = self.compute_delta(feat)
feats.append(delta.squeeze(0).transpose(0, 1))
x = torch.cat(feats, dim=-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torchaudio import transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * (x1 // 16) + 64 * (x1 //
4 % 4) + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) +
(0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 +
x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x2 = xindex
tmp0 = -2 + x0
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 +
x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 >
0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask,
eviction_policy='evict_last')
tmp1 = 0.1
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x4 = xindex // 12
x1 = xindex // 12 % 4
x2 = xindex // 48 % 4
x3 = xindex // 192
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + 64 * x2 + (-4 + x0)),
tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = 0.1
tmp12 = tmp10 * tmp11
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp9, tmp12, tmp13)
tmp15 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp18 = tl.load(in_ptr2 + (4 * x1 + 16 * x3 + 64 * x2 + (-8 + x0)),
tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp18 * tmp11
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp14, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + x5, tmp23, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32)
triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=
(0,), dilation=(1,), transposed=False, output_padding=(0,),
groups=64, bias=None)
assert_size_stride(buf2, (1, 64, 4), (256, 4, 1))
buf3 = buf0
del buf0
triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding=
(0,), dilation=(1,), transposed=False, output_padding=(0,),
groups=64, bias=None)
assert_size_stride(buf5, (1, 64, 4), (256, 4, 1))
del buf3
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf2
del buf5
return buf6,
class DeltaNew(nn.Module):
def __init__(self, order=2, **kwargs):
super(DeltaNew, self).__init__()
self.order = order
self.compute_delta = transforms.ComputeDeltas(**kwargs)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
albertvillanova/s3prl
|
Delta
| false
| 6,166
|
[
"MIT"
] | 1
|
b127ade4ed2f80a1027901bbd2f204b4fb1aaf03
|
https://github.com/albertvillanova/s3prl/tree/b127ade4ed2f80a1027901bbd2f204b4fb1aaf03
|
SAP
|
import torch
import torch.nn as nn
class SelfAttentionPooling(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
batch_rep.shape[1]
att_logits = self.W(batch_rep).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
class SAP(nn.Module):
""" Self Attention Pooling module incoporate attention mask"""
def __init__(self, out_dim):
super(SAP, self).__init__()
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
"""
Arguments
feature - [BxTxD] Acoustic feature with shape
att_mask - [BxTx1] Attention Mask logits
"""
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 64
x3 = xindex // 64
x5 = xindex // 4 % 16
x2 = xindex // 16 % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 * tmp8
tmp13 = tmp11 + tmp12
tmp15 = tmp13 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp18 = tmp16 / tmp17
tmp19 = tmp10 * tmp18
tmp20 = tmp9 + tmp19
tmp24 = tmp22 + tmp23
tmp26 = tmp24 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp29 = tmp27 / tmp28
tmp30 = tmp21 * tmp29
tmp31 = tmp20 + tmp30
tmp35 = tmp33 + tmp34
tmp37 = tmp35 - tmp36
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp38 / tmp39
tmp41 = tmp32 * tmp40
tmp42 = tmp31 + tmp41
tl.store(out_ptr0 + x7, tmp42, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_1[grid(64)](primals_4, buf2, buf3,
buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_4, buf2, buf3,
buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del buf4
return buf5, primals_4, buf0, buf2
class SelfAttentionPooling(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
batch_rep.shape[1]
att_logits = self.W(batch_rep).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
class SAPNew(nn.Module):
""" Self Attention Pooling module incoporate attention mask"""
def __init__(self, out_dim):
super(SAPNew, self).__init__()
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, input_0, input_1):
primals_2 = self.sap_layer.W.weight
primals_3 = self.sap_layer.W.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
albertvillanova/s3prl
|
SAP
| false
| 6,167
|
[
"MIT"
] | 1
|
b127ade4ed2f80a1027901bbd2f204b4fb1aaf03
|
https://github.com/albertvillanova/s3prl/tree/b127ade4ed2f80a1027901bbd2f204b4fb1aaf03
|
LargeMarginCosLoss
|
import torch
from torch import nn
def cosine_sim(x1, x2, dim=1, eps=1e-08):
ip = torch.mm(x1, x2.t())
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return ip / torch.ger(w1, w2).clamp(min=eps)
class LargeMarginCosLoss(nn.Module):
"""
CosFace: Large Margin Cosine Loss for Deep Face Recognition. CVPR2018
"""
def __init__(self, feature_size, class_num, s=7.0, m=0.2):
super(LargeMarginCosLoss, self).__init__()
self.feature_size = feature_size
self.class_num = class_num
self.s = s
self.m = m
self.weight = nn.Parameter(torch.randn(class_num, feature_size))
nn.init.xavier_uniform_(self.weight)
def forward(self, x, label):
cosine = cosine_sim(x, self.weight)
one_hot = torch.zeros_like(cosine)
one_hot.scatter_(1, label.view(-1, 1), 1.0)
output = self.s * (cosine - one_hot * self.m)
return output, cosine
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'feature_size': 4, 'class_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp14 = tmp13 * tmp13
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tmp12 * tmp24
tmp26 = 1e-08
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = tmp0 / tmp27
tmp30 = x0
tmp31 = tmp29 == tmp30
tmp32 = 1.0
tmp33 = 0.0
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = 0.2
tmp36 = tmp34 * tmp35
tmp37 = tmp28 - tmp36
tmp38 = 7.0
tmp39 = tmp37 * tmp38
tl.store(out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr1 + x2, tmp39, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0[grid
(16)](buf0, primals_2, primals_1, primals_3, buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, buf1, primals_1, primals_2, buf0
def cosine_sim(x1, x2, dim=1, eps=1e-08):
ip = torch.mm(x1, x2.t())
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return ip / torch.ger(w1, w2).clamp(min=eps)
class LargeMarginCosLossNew(nn.Module):
"""
CosFace: Large Margin Cosine Loss for Deep Face Recognition. CVPR2018
"""
def __init__(self, feature_size, class_num, s=7.0, m=0.2):
super(LargeMarginCosLossNew, self).__init__()
self.feature_size = feature_size
self.class_num = class_num
self.s = s
self.m = m
self.weight = nn.Parameter(torch.randn(class_num, feature_size))
nn.init.xavier_uniform_(self.weight)
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
alexalex222/classification_loss
|
LargeMarginCosLoss
| false
| 6,168
|
[
"MIT"
] | 1
|
a61617e0c0d5ecf6e0ff388305dd9f3eaa5cbf94
|
https://github.com/alexalex222/classification_loss/tree/a61617e0c0d5ecf6e0ff388305dd9f3eaa5cbf94
|
ParallelAttention
|
import torch
import torch.nn as nn
class ParallelAttention(nn.Module):
def __init__(self, embedding_size, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.ques_linear = nn.Linear(self.embedding_size, self.hidden_size)
self.img_linear = nn.Linear(self.embedding_size, self.hidden_size)
self.ques_linear_t = nn.Linear(self.hidden_size, 1)
self.img_linear_t = nn.Linear(self.hidden_size, 1)
self.affinity = nn.Linear(self.embedding_size, self.hidden_size,
bias=False)
self.activation = nn.Tanh()
self.softmax = nn.Softmax(dim=-1)
def forward(self, ques_embed_t, img_embed):
ques_embed = ques_embed_t.permute(0, 2, 1)
img_embed_t = img_embed.permute(0, 2, 1)
C = torch.matmul(self.affinity(ques_embed_t), img_embed)
C = self.activation(C)
C_t = C.permute(0, 2, 1)
a = self.img_linear(img_embed_t)
b = self.ques_linear(ques_embed_t)
h_vis = a + torch.matmul(C_t, b)
h_vis = self.activation(h_vis)
h_ques = b + torch.matmul(C, a)
h_ques = self.activation(h_ques)
attention_vis = self.img_linear_t(h_vis).squeeze()
attention_ques = self.ques_linear_t(h_ques).squeeze()
attention_vis = self.softmax(attention_vis)
attention_ques = self.softmax(attention_ques)
attention_vis = attention_vis.unsqueeze(1)
attention_ques = attention_ques.unsqueeze(1)
attention_feat_vis = torch.mul(img_embed, attention_vis)
attention_feat_vis = torch.sum(attention_feat_vis, dim=-1)
attention_feat_ques = torch.mul(ques_embed, attention_ques)
attention_feat_ques = torch.sum(attention_feat_ques, dim=-1)
return attention_feat_vis, attention_feat_ques
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1, 4), (4, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf1)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(64)](buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(16, 4)](primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
del primals_4
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf5)
del primals_6
del primals_7
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4),
0), reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_tanh_2[grid(64)](buf7, buf4, primals_5, buf8,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf9 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf2, buf8, out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_add_tanh_3[grid(64)](buf10, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_9
buf14 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_11
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf12, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf14, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(16)](buf15, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf18 = buf15
del buf15
triton_poi_fused_mul_sum_6[grid(16)](primals_2, buf17, buf18, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf19 = buf17
del buf17
triton_poi_fused__softmax_5[grid(16)](buf16, buf19, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf20 = buf16
del buf16
triton_poi_fused_mul_sum_7[grid(16)](primals_1, buf19, buf20, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf19
return buf18, buf20, primals_1, primals_2, buf2, reinterpret_tensor(buf3,
(16, 4), (4, 1), 0
), buf7, buf10, buf12, buf14, primals_10, primals_8, reinterpret_tensor(
buf8, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 4),
(16, 1, 4), 0)
class ParallelAttentionNew(nn.Module):
def __init__(self, embedding_size, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.ques_linear = nn.Linear(self.embedding_size, self.hidden_size)
self.img_linear = nn.Linear(self.embedding_size, self.hidden_size)
self.ques_linear_t = nn.Linear(self.hidden_size, 1)
self.img_linear_t = nn.Linear(self.hidden_size, 1)
self.affinity = nn.Linear(self.embedding_size, self.hidden_size,
bias=False)
self.activation = nn.Tanh()
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0, input_1):
primals_3 = self.ques_linear.weight
primals_5 = self.ques_linear.bias
primals_4 = self.img_linear.weight
primals_7 = self.img_linear.bias
primals_8 = self.ques_linear_t.weight
primals_9 = self.ques_linear_t.bias
primals_10 = self.img_linear_t.weight
primals_11 = self.img_linear_t.bias
primals_6 = self.affinity.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
alasin/vqa_pytorch
|
ParallelAttention
| false
| 6,169
|
[
"MIT"
] | 1
|
8a311226d8eea56ef79f6be3c864ec05768e2895
|
https://github.com/alasin/vqa_pytorch/tree/8a311226d8eea56ef79f6be3c864ec05768e2895
|
SpatialAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=
kernel_size, padding=padding, bias=False)
def forward(self, x):
max_out, _ = torch.max(x, dim=1, keepdim=True)
avg_out = torch.mean(x, dim=1, keepdim=True)
x = torch.cat([max_out, avg_out], dim=1)
x = self.conv1(x)
out = F.sigmoid(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = 4.0
tmp25 = tmp23 / tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp13, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_sigmoid_1[grid(64)](buf2, 64, XBLOCK=64, num_warps
=1, num_stages=1)
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttentionNew, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=
kernel_size, padding=padding, bias=False)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
alexchungio/Scene-Classification-Competition
|
SpatialAttention
| false
| 6,170
|
[
"Apache-2.0"
] | 1
|
d936667ceba1c0b8f90eb266019f43ff27767534
|
https://github.com/alexchungio/Scene-Classification-Competition/tree/d936667ceba1c0b8f90eb266019f43ff27767534
|
ScaledL2Norm
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledL2Norm(nn.Module):
def __init__(self, in_channels, initial_scale):
super(ScaledL2Norm, self).__init__()
self.in_channels = in_channels
self.scale = nn.Parameter(torch.Tensor(in_channels))
self.initial_scale = initial_scale
self.reset_parameters()
def forward(self, x):
return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2
).unsqueeze(3)
def reset_parameters(self):
self.scale.data.fill_(self.initial_scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'initial_scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class ScaledL2NormNew(nn.Module):
def __init__(self, in_channels, initial_scale):
super(ScaledL2NormNew, self).__init__()
self.in_channels = in_channels
self.scale = nn.Parameter(torch.Tensor(in_channels))
self.initial_scale = initial_scale
self.reset_parameters()
def reset_parameters(self):
self.scale.data.fill_(self.initial_scale)
def forward(self, input_0):
primals_2 = self.scale
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
alejodosr/adaptive-inattention
|
ScaledL2Norm
| false
| 6,171
|
[
"MIT"
] | 1
|
ad1c883081e5248704be5ce5c4baa24b2eda1c59
|
https://github.com/alejodosr/adaptive-inattention/tree/ad1c883081e5248704be5ce5c4baa24b2eda1c59
|
BottleneckLSTMCell
|
import logging
import torch
import torch.nn as nn
from torch.autograd import Variable
class BottleneckLSTMCell(nn.Module):
""" Creates a LSTM layer cell
Arguments:
input_channels : variable used to contain value of number of channels in input
hidden_channels : variable used to contain value of number of channels in the hidden state of LSTM cell
"""
def __init__(self, input_channels, hidden_channels, device):
super(BottleneckLSTMCell, self).__init__()
assert hidden_channels % 2 == 0
self.device = device
self.input_channels = int(input_channels)
self.hidden_channels = int(hidden_channels)
self.num_features = 4
self.W = nn.Conv2d(in_channels=self.input_channels, out_channels=
self.input_channels, kernel_size=3, groups=self.input_channels,
stride=1, padding=1)
self.Wy = nn.Conv2d(int(self.input_channels + self.hidden_channels),
self.hidden_channels, kernel_size=1)
self.Wi = nn.Conv2d(self.hidden_channels, self.hidden_channels, 3,
1, 1, groups=self.hidden_channels, bias=False)
self.Wbi = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.Wbf = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.Wbc = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.Wbo = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.relu = nn.ReLU6()
logging.info('Initializing weights of lstm')
self._initialize_weights()
def _initialize_weights(self):
"""
Returns:
initialized weights of the model
"""
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, h, c):
"""
Arguments:
x : input tensor
h : hidden state tensor
c : cell state tensor
Returns:
output tensor after LSTM cell
"""
x = self.W(x)
y = torch.cat((x, h), 1)
i = self.Wy(y)
b = self.Wi(i)
ci = torch.sigmoid(self.Wbi(b))
cf = torch.sigmoid(self.Wbf(b))
cc = cf * c + ci * self.relu(self.Wbc(b))
co = torch.sigmoid(self.Wbo(b))
ch = co * self.relu(cc)
return ch, cc
def init_hidden(self, batch_size, hidden, shape):
"""
Arguments:
batch_size : an int variable having value of batch size while training
hidden : an int variable having value of number of channels in hidden state
shape : an array containing shape of the hidden and cell state
device : CPU or GPU id string device
Returns:
cell state and hidden state
"""
if str(self.device) == 'cpu':
return Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
), Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
)
else:
return Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
), Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'hidden_channels': 4, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import logging
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 &
xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_hardtanh_mul_sigmoid_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask)
tmp13 = tl.load(in_ptr4 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = 6.0
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = tmp5 * tmp10
tmp12 = tmp3 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = triton_helpers.maximum(tmp12, tmp7)
tmp16 = triton_helpers.minimum(tmp15, tmp9)
tmp17 = tmp14 * tmp16
tl.store(out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr1 + x0, tmp17, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_11, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, primals_2, primals_4, buf1,
512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(256)](buf3, primals_6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = extern_kernels.convolution(buf4, primals_9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = extern_kernels.convolution(buf4, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = extern_kernels.convolution(buf4, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf0
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_hardtanh_mul_sigmoid_2[grid(256)](buf6,
primals_10, buf5, buf7, buf9, buf8, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return (buf10, buf8, primals_1, primals_3, primals_5, primals_7,
primals_8, primals_9, primals_10, primals_11, primals_12, buf1,
buf3, buf4, buf5, buf6, buf7, buf8, buf9)
class BottleneckLSTMCellNew(nn.Module):
""" Creates a LSTM layer cell
Arguments:
input_channels : variable used to contain value of number of channels in input
hidden_channels : variable used to contain value of number of channels in the hidden state of LSTM cell
"""
def __init__(self, input_channels, hidden_channels, device):
super(BottleneckLSTMCellNew, self).__init__()
assert hidden_channels % 2 == 0
self.device = device
self.input_channels = int(input_channels)
self.hidden_channels = int(hidden_channels)
self.num_features = 4
self.W = nn.Conv2d(in_channels=self.input_channels, out_channels=
self.input_channels, kernel_size=3, groups=self.input_channels,
stride=1, padding=1)
self.Wy = nn.Conv2d(int(self.input_channels + self.hidden_channels),
self.hidden_channels, kernel_size=1)
self.Wi = nn.Conv2d(self.hidden_channels, self.hidden_channels, 3,
1, 1, groups=self.hidden_channels, bias=False)
self.Wbi = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.Wbf = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.Wbc = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.Wbo = nn.Conv2d(self.hidden_channels, self.hidden_channels, 1,
1, 0, bias=False)
self.relu = nn.ReLU6()
logging.info('Initializing weights of lstm')
self._initialize_weights()
def _initialize_weights(self):
"""
Returns:
initialized weights of the model
"""
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def init_hidden(self, batch_size, hidden, shape):
"""
Arguments:
batch_size : an int variable having value of batch size while training
hidden : an int variable having value of number of channels in hidden state
shape : an array containing shape of the hidden and cell state
device : CPU or GPU id string device
Returns:
cell state and hidden state
"""
if str(self.device) == 'cpu':
return Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
), Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
)
else:
return Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
), Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])
)
def forward(self, input_0, input_1, input_2):
primals_1 = self.W.weight
primals_2 = self.W.bias
primals_5 = self.Wy.weight
primals_6 = self.Wy.bias
primals_7 = self.Wi.weight
primals_8 = self.Wbi.weight
primals_9 = self.Wbf.weight
primals_11 = self.Wbc.weight
primals_12 = self.Wbo.weight
primals_3 = input_0
primals_4 = input_1
primals_10 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0], output[1]
|
alejodosr/adaptive-inattention
|
BottleneckLSTMCell
| false
| 6,172
|
[
"MIT"
] | 1
|
ad1c883081e5248704be5ce5c4baa24b2eda1c59
|
https://github.com/alejodosr/adaptive-inattention/tree/ad1c883081e5248704be5ce5c4baa24b2eda1c59
|
AndMLP
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AndMLP(nn.Module):
def __init__(self, n_layers, entity_dim):
super(AndMLP, self).__init__()
self.n_layers = n_layers
self.layers = []
for i in range(1, self.n_layers + 1):
setattr(self, 'and_layer_{}'.format(i), nn.Linear(2 *
entity_dim, 2 * entity_dim))
self.last_layer = nn.Linear(2 * entity_dim, entity_dim)
def forward(self, x1, x2):
x = torch.cat((x1, x2), dim=-1)
for i in range(1, self.n_layers + 1):
x = F.relu(getattr(self, 'and_layer_{}'.format(i))(x))
x = self.last_layer(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1, 'entity_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (8, 8), (8, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 8), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf2,
primals_4, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(
buf2, (64, 8), (8, 1), 0), primals_5, buf4
class AndMLPNew(nn.Module):
def __init__(self, n_layers, entity_dim):
super(AndMLPNew, self).__init__()
self.n_layers = n_layers
self.layers = []
for i in range(1, self.n_layers + 1):
setattr(self, 'and_layer_{}'.format(i), nn.Linear(2 *
entity_dim, 2 * entity_dim))
self.last_layer = nn.Linear(2 * entity_dim, entity_dim)
def forward(self, input_0, input_1):
primals_3 = self.and_layer_1.weight
primals_4 = self.and_layer_1.bias
primals_5 = self.last_layer.weight
primals_6 = self.last_layer.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
amayuelas/NNKGReasoning
|
AndMLP
| false
| 6,173
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
dce_loss
|
import torch
from torch import nn
class dce_loss(nn.Module):
def __init__(self, n_classes, feat_dim, init_weight=True):
super(dce_loss, self).__init__()
self.n_classes = n_classes
self.feat_dim = feat_dim
self.centers = nn.Parameter(torch.randn(self.feat_dim, self.
n_classes), requires_grad=True)
nn.init.kaiming_normal_(self.centers)
def forward(self, x):
features_square = torch.sum(torch.pow(x, 2), 1, keepdim=True)
centers_square = torch.sum(torch.pow(self.centers, 2), 0, keepdim=True)
features_into_centers = 2 * torch.matmul(x, self.centers)
dist = features_square + centers_square - features_into_centers
return self.centers, -dist
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_classes': 4, 'feat_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_pow_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tl.store(out_ptr0 + x5, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_mul_neg_pow_sub_sum_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 - tmp3
tmp5 = -tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_2, out=buf0)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sum_0[grid(64)](primals_1, primals_2, buf1,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mul_neg_pow_sub_sum_1[grid(256)](buf2, buf1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
return buf2, primals_2, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
class dce_lossNew(nn.Module):
def __init__(self, n_classes, feat_dim, init_weight=True):
super(dce_lossNew, self).__init__()
self.n_classes = n_classes
self.feat_dim = feat_dim
self.centers = nn.Parameter(torch.randn(self.feat_dim, self.
n_classes), requires_grad=True)
nn.init.kaiming_normal_(self.centers)
def forward(self, input_0):
primals_2 = self.centers
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
alexalex222/classification_loss
|
dce_loss
| false
| 6,174
|
[
"MIT"
] | 1
|
a61617e0c0d5ecf6e0ff388305dd9f3eaa5cbf94
|
https://github.com/alexalex222/classification_loss/tree/a61617e0c0d5ecf6e0ff388305dd9f3eaa5cbf94
|
Boom
|
import torch
import torch.nn as nn
class Boom(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.1, shortcut
=False, output_size=512):
super(Boom, self).__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout) if dropout else None
if not shortcut:
self.linear2 = nn.Linear(dim_feedforward, output_size)
self.shortcut = shortcut
self.act = nn.GELU()
def forward(self, input):
x = self.act(self.linear1(input))
if self.dropout:
x = self.dropout(x)
if self.shortcut:
ninp = input.shape[-1]
x = torch.narrow(x, -1, 0, x.shape[-1] // ninp * ninp)
x = x.view(*x.shape[:-1], x.shape[-1] // ninp, ninp)
z = x.sum(dim=-2)
else:
z = self.linear2(x)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2048, 4), (4, 1))
assert_size_stride(primals_2, (2048,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 2048), (2048, 1))
assert_size_stride(primals_5, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(131072)](buf0, buf1, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_4, (2048, 512), (1,
2048), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), primals_4
class BoomNew(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.1, shortcut
=False, output_size=512):
super(BoomNew, self).__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout) if dropout else None
if not shortcut:
self.linear2 = nn.Linear(dim_feedforward, output_size)
self.shortcut = shortcut
self.act = nn.GELU()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
alisafaya/char-rnn.pytorch
|
Boom
| false
| 6,175
|
[
"MIT"
] | 1
|
473538d9f4d57a3206dccef22f7e03826c398cfb
|
https://github.com/alisafaya/char-rnn.pytorch/tree/473538d9f4d57a3206dccef22f7e03826c398cfb
|
CenterLoss
|
import torch
from torch import nn
class CenterLoss(nn.Module):
"""Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.
"""
def __init__(self, num_classes=10, feat_dim=2):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
if torch.cuda.is_available():
self.centers = nn.Parameter(torch.randn(self.num_classes, self.
feat_dim))
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.
feat_dim))
def forward_old(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size,
self.num_classes) + torch.pow(self.centers, 2).sum(dim=1,
keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2)
classes = torch.arange(self.num_classes).long()
if torch.cuda.is_available():
classes = classes
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1000000000000.0).sum(
) / batch_size / self.feat_dim
return loss
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)
batch_centers = self.centers[labels]
dist = torch.pow(x - batch_centers, 2)
loss = dist.clamp(min=1e-12, max=1000000000000.0).sum(
) / batch_size / self.feat_dim
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 2]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2
x0 = xindex % 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 10, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 10) | ~xmask,
'index out of bounds: 0 <= tmp4 < 10')
tmp6 = tl.load(in_ptr1 + (x0 + 2 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_per_fused_clamp_div_pow_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 8
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-12
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = 1000000000000.0
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = 0.25
tmp12 = tmp10 * tmp11
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 2), (32, 8, 2, 1))
assert_size_stride(primals_2, (10, 2), (2, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(8)](primals_3, primals_2, buf0, 8,
XBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_clamp_div_pow_sub_sum_1[grid(1)](buf2, primals_1,
buf0, 1, 128, XBLOCK=1, num_warps=2, num_stages=1)
return buf2, primals_1, primals_3, buf0
class CenterLossNew(nn.Module):
"""Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.
"""
def __init__(self, num_classes=10, feat_dim=2):
super(CenterLossNew, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
if torch.cuda.is_available():
self.centers = nn.Parameter(torch.randn(self.num_classes, self.
feat_dim))
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.
feat_dim))
def forward_old(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size,
self.num_classes) + torch.pow(self.centers, 2).sum(dim=1,
keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2)
classes = torch.arange(self.num_classes).long()
if torch.cuda.is_available():
classes = classes
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1000000000000.0).sum(
) / batch_size / self.feat_dim
return loss
def forward(self, input_0, input_1):
primals_2 = self.centers
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
alexalex222/classification_loss
|
CenterLoss
| false
| 6,176
|
[
"MIT"
] | 1
|
a61617e0c0d5ecf6e0ff388305dd9f3eaa5cbf94
|
https://github.com/alexalex222/classification_loss/tree/a61617e0c0d5ecf6e0ff388305dd9f3eaa5cbf94
|
PearsonCorrelation
|
import torch
import torch.nn as nn
class PearsonCorrelation(nn.Module):
"""
Module for measuring Pearson correlation.
Given samples (x, y), the Pearson correlation coefficient is given by:
.. math::
r = rac{{}\\sum_{i=1}^{n} (x_i - \\overline{x})(y_i - \\overline{y})}
{\\sqrt{\\sum_{i=1}^{n} (x_i - \\overline{x})^2(y_i - \\overline{y})^2}}
"""
def __init__(self) ->None:
super(PearsonCorrelation, self).__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
"""
Override the forward function in nn.Modules. Performs Pearson
Correlation calculation between ``score`` and ``mos``
Parameters
----------
x : torch.Tensor
First input vector.
y : torch.Tensor
Second input vector.
TODO: dimensions of the input data have to agree, have to be batch wise
and check for NaN values in the result.
"""
x_n = x - torch.mean(x)
y_n = y - torch.mean(y)
x_n_norm = torch.sqrt(torch.mean(x_n ** 2))
y_n_norm = torch.sqrt(torch.mean(y_n ** 2))
normaliser = x_n_norm * y_n_norm
return -torch.mean(x_n * y_n.squeeze(), dim=0) / normaliser
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = tmp0 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None)
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp10, None)
@triton.jit
def triton_poi_fused_div_mean_mul_neg_pow_sqrt_squeeze_sub_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp7 = tl.load(in_ptr3 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp14 = tl.load(in_ptr2 + (64 + x0), xmask)
tmp18 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp20 = tl.load(in_ptr2 + (128 + x0), xmask)
tmp24 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp26 = tl.load(in_ptr2 + (192 + x0), xmask)
tmp33 = tl.load(in_ptr4 + 0)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp37 = tl.load(in_ptr5 + 0)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK])
tmp3 = 256.0
tmp4 = tmp2 / tmp3
tmp5 = tmp0 - tmp4
tmp9 = tmp8 / tmp3
tmp10 = tmp6 - tmp9
tmp11 = tmp5 * tmp10
tmp13 = tmp12 - tmp4
tmp15 = tmp14 - tmp9
tmp16 = tmp13 * tmp15
tmp17 = tmp11 + tmp16
tmp19 = tmp18 - tmp4
tmp21 = tmp20 - tmp9
tmp22 = tmp19 * tmp21
tmp23 = tmp17 + tmp22
tmp25 = tmp24 - tmp4
tmp27 = tmp26 - tmp9
tmp28 = tmp25 * tmp27
tmp29 = tmp23 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = -tmp31
tmp35 = tmp34 / tmp3
tmp36 = libdevice.sqrt(tmp35)
tmp39 = tmp38 / tmp3
tmp40 = libdevice.sqrt(tmp39)
tmp41 = tmp36 * tmp40
tmp42 = tmp32 / tmp41
tl.store(in_out_ptr0 + x0, tmp42, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_pow_sub_0[grid(1)](arg0_1, buf0, buf3, 1, 256,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_mean_pow_sub_0[grid(1)](arg1_1, buf1, buf4, 1, 256,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = buf2
del buf2
triton_poi_fused_div_mean_mul_neg_pow_sqrt_squeeze_sub_1[grid(64)](buf5
, arg0_1, buf0, arg1_1, buf1, buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del buf0
del buf1
del buf3
del buf4
return buf5,
class PearsonCorrelationNew(nn.Module):
"""
Module for measuring Pearson correlation.
Given samples (x, y), the Pearson correlation coefficient is given by:
.. math::
r = rac{{}\\sum_{i=1}^{n} (x_i - \\overline{x})(y_i - \\overline{y})}
{\\sqrt{\\sum_{i=1}^{n} (x_i - \\overline{x})^2(y_i - \\overline{y})^2}}
"""
def __init__(self) ->None:
super(PearsonCorrelationNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
alexhepburn/expert
|
PearsonCorrelation
| false
| 6,177
|
[
"BSD-3-Clause"
] | 1
|
546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
https://github.com/alexhepburn/expert/tree/546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
ThreeLayerCNN
|
import torch
import torch.utils.data
class ThreeLayerCNN(torch.nn.Module):
"""
Input: 128x128 face image (eye aligned).
Output: 1-D tensor with 2 elements. Used for binary classification.
Parameters:
Number of conv layers: 3
Number of fully connected layers: 2
"""
def __init__(self):
super(ThreeLayerCNN, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.conv3 = torch.nn.Conv2d(16, 16, 6)
self.fc1 = torch.nn.Linear(16 * 4 * 4, 120)
self.fc2 = torch.nn.Linear(120, 2)
def forward(self, x):
x = self.pool(torch.nn.functional.relu(self.conv1(x)))
x = self.pool(torch.nn.functional.relu(self.conv2(x)))
x = self.pool(torch.nn.functional.relu(self.conv3(x)))
x = x.view(-1, 16 * 4 * 4)
x = torch.nn.functional.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 86400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 6
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 21600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x4 = xindex // 900
x3 = xindex // 5400
x5 = xindex % 5400
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 5408 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 5504 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x3 = xindex // 13
x2 = xindex // 2704
x4 = xindex % 2704
tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 2720 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 2816 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 16, 6, 6), (576, 36, 6, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (120, 256), (256, 1))
assert_size_stride(primals_9, (120,), (1,))
assert_size_stride(primals_10, (2, 120), (120, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(86400)](buf0, primals_2,
buf1, 86400, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 30, 30), (5408, 900, 30, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(21600)](buf1, buf2,
buf3, 21600, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 26, 26), (10816, 676, 26, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(43264)](buf5, primals_5,
43264, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 13, 13), (2720, 169, 13, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 16, 13, 13), (2816, 169, 13, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(10816)](buf5, buf6,
buf7, 10816, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 8, 8), (1024, 64, 8, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(4096)](buf9, primals_7,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.int8)
buf11 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_5[grid(1024)](buf9, buf10,
buf11, 1024, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (4, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 120), (1, 256), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(480)](buf13, primals_9, 480, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_11, buf13, reinterpret_tensor(
primals_10, (120, 2), (1, 120), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (4,
256), (256, 1), 0), buf13, primals_10, primals_8)
class ThreeLayerCNNNew(torch.nn.Module):
"""
Input: 128x128 face image (eye aligned).
Output: 1-D tensor with 2 elements. Used for binary classification.
Parameters:
Number of conv layers: 3
Number of fully connected layers: 2
"""
def __init__(self):
super(ThreeLayerCNNNew, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.conv3 = torch.nn.Conv2d(16, 16, 6)
self.fc1 = torch.nn.Linear(16 * 4 * 4, 120)
self.fc2 = torch.nn.Linear(120, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
aleb/pipelines
|
ThreeLayerCNN
| false
| 6,178
|
[
"Apache-2.0"
] | 1
|
2181b2fb8bdd6cd93e7d677b9840ed1b58a83a85
|
https://github.com/aleb/pipelines/tree/2181b2fb8bdd6cd93e7d677b9840ed1b58a83a85
|
BCELovaszLoss
|
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(np.isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(
0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)
)
return loss
class BCELovaszLoss(nn.Module):
def __init__(self, bce_weight):
super().__init__()
self.bce_weight = bce_weight
self.bce_loss = nn.BCEWithLogitsLoss()
def forward(self, logits, targets, weight=None):
self.bce_loss.weight = weight
return self.bce_weight * self.bce_loss(logits, targets) + (1 - self
.bce_weight) * lovasz_hinge(logits, targets, per_image=False)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'bce_weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 4.0
tmp19 = tmp17 * tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mul_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(np.isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(
0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)
)
return loss
class BCELovaszLossNew(nn.Module):
def __init__(self, bce_weight):
super().__init__()
self.bce_weight = bce_weight
self.bce_loss = nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
amitkumarj441/TGS_Kaggle
|
BCELovaszLoss
| false
| 6,179
|
[
"MIT"
] | 1
|
a4f613046cc36f3f6dbec28adb35f97a63c2a994
|
https://github.com/amitkumarj441/TGS_Kaggle/tree/a4f613046cc36f3f6dbec28adb35f97a63c2a994
|
TorchGloVeLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class TorchGloVeLoss(nn.Module):
def __init__(self):
super().__init__()
self.reduction = 'sum'
def forward(self, diffs, weights):
return torch.sum(0.5 * torch.mul(weights, diffs ** 2))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_pow_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp1 * tmp1
tmp3 = tmp0 * tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_pow_sum_0[grid(1)](arg1_1, arg0_1, buf0, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class TorchGloVeLossNew(nn.Module):
def __init__(self):
super().__init__()
self.reduction = 'sum'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ammarhusain/cs224u
|
TorchGloVeLoss
| false
| 6,180
|
[
"Apache-2.0"
] | 1
|
bbdb0aaa6b7437481e2e1fab8e12bbf1996eecd1
|
https://github.com/ammarhusain/cs224u/tree/bbdb0aaa6b7437481e2e1fab8e12bbf1996eecd1
|
BoxOffsetIntersection
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BoxOffsetIntersection(nn.Module):
def __init__(self, dim):
super(BoxOffsetIntersection, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, embeddings):
layer1_act = F.relu(self.layer1(embeddings))
layer1_mean = torch.mean(layer1_act, dim=0)
gate = torch.sigmoid(self.layer2(layer1_mean))
offset, _ = torch.min(embeddings, dim=0)
return offset * gate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp9 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp13 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp3, tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 4.0
tmp18 = tmp16 / tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_min_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp6 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_relu_0[grid(64)](buf0, primals_2, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_min_mul_sigmoid_1[grid(64)](primals_3, buf2, buf3,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf0,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf3, primals_3, reinterpret_tensor(buf1, (16, 4), (4, 1), 0
), buf2, primals_4, buf4
class BoxOffsetIntersectionNew(nn.Module):
def __init__(self, dim):
super(BoxOffsetIntersectionNew, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, input_0):
primals_1 = self.layer1.weight
primals_2 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
amayuelas/NNKGReasoning
|
BoxOffsetIntersection
| false
| 6,181
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
HME
|
import numpy
import torch
class HME(torch.nn.Module):
def __init__(self, in_features, out_features, depth, projection='linear'):
super(HME, self).__init__()
self.proj = projection
self.depth = depth
self.in_features = in_features
self.out_features = out_features
self.n_leaf = int(2 ** depth)
self.gate_count = int(self.n_leaf - 1)
self.gw = torch.nn.Parameter(torch.nn.init.kaiming_normal_(torch.
empty(self.gate_count, in_features), nonlinearity='sigmoid').t())
self.gb = torch.nn.Parameter(torch.zeros(self.gate_count))
if self.proj == 'linear':
self.pw = torch.nn.init.kaiming_normal_(torch.empty(
out_features * self.n_leaf, in_features), nonlinearity='linear'
)
self.pw = torch.nn.Parameter(self.pw.reshape(out_features, self
.n_leaf, in_features).permute(0, 2, 1))
self.pb = torch.nn.Parameter(torch.zeros(out_features, self.n_leaf)
)
elif self.proj == 'constant':
self.z = torch.nn.Parameter(torch.randn(out_features, self.n_leaf))
def forward(self, x):
node_densities = self.node_densities(x)
leaf_probs = node_densities[:, -self.n_leaf:].t()
if self.proj == 'linear':
gated_projection = torch.matmul(self.pw, leaf_probs).permute(2,
0, 1)
gated_bias = torch.matmul(self.pb, leaf_probs).permute(1, 0)
result = torch.matmul(gated_projection, x.reshape(-1, self.
in_features, 1))[:, :, 0] + gated_bias
elif self.proj == 'constant':
result = torch.matmul(self.z, leaf_probs).permute(1, 0)
return result
def node_densities(self, x):
gatings = self.gatings(x)
node_densities = torch.ones(x.shape[0], 2 ** (self.depth + 1) - 1,
device=x.device)
it = 1
for d in range(1, self.depth + 1):
for i in range(2 ** d):
parent_index = (it + 1) // 2 - 1
child_way = (it + 1) % 2
if child_way == 0:
parent_gating = gatings[:, parent_index]
else:
parent_gating = 1 - gatings[:, parent_index]
parent_density = node_densities[:, parent_index].clone()
node_densities[:, it] = parent_density * parent_gating
it += 1
return node_densities
def gatings(self, x):
return torch.sigmoid(torch.add(torch.matmul(x, self.gw), self.gb))
def total_path_value(self, z, index, level=None):
gatings = self.gatings(z)
gateways = numpy.binary_repr(index, width=self.depth)
L = 0.0
current = 0
if level is None:
level = self.depth
for i in range(level):
if int(gateways[i]) == 0:
L += gatings[:, current].mean()
current = 2 * current + 1
else:
L += (1 - gatings[:, current]).mean()
current = 2 * current + 2
return L
def extra_repr(self):
return 'in_features=%d, out_features=%d, depth=%d, projection=%s' % (
self.in_features, self.out_features, self.depth, self.proj)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'depth': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp7 == tmp8
tmp10 = tmp5 * tmp4
tmp11 = tl.where(tmp9, tmp10, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 8 * (y0 // 4) + y0 % 4), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 2 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_copy_mul_ones_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp3 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp3 * tmp4
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp0 == tmp6
tmp11 = tmp8 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp13 = 1.0
tmp14 = tmp13 * tmp12
tmp15 = tl.where(tmp7, tmp14, tmp13)
tmp16 = tl.where(tmp2, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_out_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x1 + 4 * y0), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1), (1, 4))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 2), (8, 1, 4))
assert_size_stride(primals_5, (4, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_rsub_0[grid(4)](buf0, primals_3, buf1, buf2,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_1[grid(16, 2)](primals_4, buf3,
16, 2, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
triton_poi_fused_clone_copy_mul_ones_2[grid(12)](buf2, buf1, buf0,
primals_3, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(buf4, (2, 4), (1, 3), 1),
out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_5, reinterpret_tensor(buf4, (2, 4), (1, 3
), 1), out=buf6)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (1, 16, 4),
0), reinterpret_tensor(primals_2, (4, 4, 1), (4, 1, 1), 0), out
=buf7)
del buf5
buf8 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
triton_poi_fused_add_3[grid(4, 4)](buf8, buf6, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
del buf6
return buf8, primals_2, primals_3, buf0, buf1, buf2, reinterpret_tensor(
primals_5, (2, 4), (1, 2), 0), reinterpret_tensor(buf4, (4, 2), (3,
1), 1), reinterpret_tensor(buf3, (2, 16), (1, 2), 0)
class HMENew(torch.nn.Module):
def __init__(self, in_features, out_features, depth, projection='linear'):
super(HMENew, self).__init__()
self.proj = projection
self.depth = depth
self.in_features = in_features
self.out_features = out_features
self.n_leaf = int(2 ** depth)
self.gate_count = int(self.n_leaf - 1)
self.gw = torch.nn.Parameter(torch.nn.init.kaiming_normal_(torch.
empty(self.gate_count, in_features), nonlinearity='sigmoid').t())
self.gb = torch.nn.Parameter(torch.zeros(self.gate_count))
if self.proj == 'linear':
self.pw = torch.nn.init.kaiming_normal_(torch.empty(
out_features * self.n_leaf, in_features), nonlinearity='linear'
)
self.pw = torch.nn.Parameter(self.pw.reshape(out_features, self
.n_leaf, in_features).permute(0, 2, 1))
self.pb = torch.nn.Parameter(torch.zeros(out_features, self.n_leaf)
)
elif self.proj == 'constant':
self.z = torch.nn.Parameter(torch.randn(out_features, self.n_leaf))
def node_densities(self, x):
gatings = self.gatings(x)
node_densities = torch.ones(x.shape[0], 2 ** (self.depth + 1) - 1,
device=x.device)
it = 1
for d in range(1, self.depth + 1):
for i in range(2 ** d):
parent_index = (it + 1) // 2 - 1
child_way = (it + 1) % 2
if child_way == 0:
parent_gating = gatings[:, parent_index]
else:
parent_gating = 1 - gatings[:, parent_index]
parent_density = node_densities[:, parent_index].clone()
node_densities[:, it] = parent_density * parent_gating
it += 1
return node_densities
def gatings(self, x):
return torch.sigmoid(torch.add(torch.matmul(x, self.gw), self.gb))
def total_path_value(self, z, index, level=None):
gatings = self.gatings(z)
gateways = numpy.binary_repr(index, width=self.depth)
L = 0.0
current = 0
if level is None:
level = self.depth
for i in range(level):
if int(gateways[i]) == 0:
L += gatings[:, current].mean()
current = 2 * current + 1
else:
L += (1 - gatings[:, current]).mean()
current = 2 * current + 2
return L
def extra_repr(self):
return 'in_features=%d, out_features=%d, depth=%d, projection=%s' % (
self.in_features, self.out_features, self.depth, self.proj)
def forward(self, input_0):
primals_1 = self.gw
primals_3 = self.gb
primals_4 = self.pw
primals_5 = self.pb
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
alper111/hmog
|
HME
| false
| 6,182
|
[
"MIT"
] | 1
|
556da11600c97bcb075a0f19ffc284120d9789d2
|
https://github.com/alper111/hmog/tree/556da11600c97bcb075a0f19ffc284120d9789d2
|
ME
|
import torch
class ME(torch.nn.Module):
def __init__(self, in_features, out_features, n_leaf, projection=
'linear', dropout=0.0):
super(ME, self).__init__()
self.proj = projection
self.n_leaf = n_leaf
self.in_features = in_features
self.out_features = out_features
self.gw = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.
empty(in_features, n_leaf)))
self.gb = torch.nn.Parameter(torch.zeros(n_leaf))
if self.proj == 'linear':
self.pw = torch.nn.init.kaiming_normal_(torch.empty(
out_features * n_leaf, in_features), nonlinearity='linear')
self.pw = torch.nn.Parameter(self.pw.reshape(out_features,
n_leaf, in_features).permute(0, 2, 1))
self.pb = torch.nn.Parameter(torch.zeros(out_features, n_leaf))
elif self.proj == 'constant':
self.z = torch.nn.Parameter(torch.randn(out_features, n_leaf))
def forward(self, x):
gatings = torch.softmax(torch.add(torch.matmul(x, self.gw), self.gb
), dim=1).t()
if self.proj == 'linear':
gated_projection = torch.matmul(self.pw, gatings).permute(2, 0, 1)
gated_bias = torch.matmul(self.pb, gatings).permute(1, 0)
result = torch.matmul(gated_projection, x.reshape(-1, self.
in_features, 1))[:, :, 0] + gated_bias
elif self.proj == 'constant':
result = torch.matmul(self.z, gatings).permute(1, 0)
return result
def extra_repr(self):
return 'in_features=%d, out_features=%d, n_leaf=%d, projection=%s' % (
self.in_features, self.out_features, self.n_leaf, self.proj)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'n_leaf': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_out_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x1 + 4 * y0), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 1, 4))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_2, primals_1, alpha=1, beta
=1, out=buf0)
del primals_1
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](primals_4, buf3, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(buf2, (4, 4), (1, 4), 0), out=buf4)
buf5 = buf1
del buf1
extern_kernels.mm(primals_5, reinterpret_tensor(buf2, (4, 4), (1, 4
), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (1, 16, 4),
0), reinterpret_tensor(primals_2, (4, 4, 1), (4, 1, 1), 0), out
=buf6)
del buf4
buf7 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused_add_3[grid(4, 4)](buf7, buf5, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
del buf5
return buf7, primals_2, primals_5, buf2, reinterpret_tensor(buf3, (16,
4), (4, 1), 0)
class MENew(torch.nn.Module):
def __init__(self, in_features, out_features, n_leaf, projection=
'linear', dropout=0.0):
super(MENew, self).__init__()
self.proj = projection
self.n_leaf = n_leaf
self.in_features = in_features
self.out_features = out_features
self.gw = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.
empty(in_features, n_leaf)))
self.gb = torch.nn.Parameter(torch.zeros(n_leaf))
if self.proj == 'linear':
self.pw = torch.nn.init.kaiming_normal_(torch.empty(
out_features * n_leaf, in_features), nonlinearity='linear')
self.pw = torch.nn.Parameter(self.pw.reshape(out_features,
n_leaf, in_features).permute(0, 2, 1))
self.pb = torch.nn.Parameter(torch.zeros(out_features, n_leaf))
elif self.proj == 'constant':
self.z = torch.nn.Parameter(torch.randn(out_features, n_leaf))
def extra_repr(self):
return 'in_features=%d, out_features=%d, n_leaf=%d, projection=%s' % (
self.in_features, self.out_features, self.n_leaf, self.proj)
def forward(self, input_0):
primals_1 = self.gw
primals_3 = self.gb
primals_4 = self.pw
primals_2 = self.pb
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
alper111/hmog
|
ME
| false
| 6,183
|
[
"MIT"
] | 1
|
556da11600c97bcb075a0f19ffc284120d9789d2
|
https://github.com/alper111/hmog/tree/556da11600c97bcb075a0f19ffc284120d9789d2
|
CenterIntersection
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CenterIntersection(nn.Module):
def __init__(self, dim):
super(CenterIntersection, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, embeddings):
layer1_act = F.relu(self.layer1(embeddings))
attention = F.softmax(self.layer2(layer1_act), dim=0)
embedding = torch.sum(attention * embeddings, dim=0)
return embedding
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp11 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp19 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp9 = tmp7 * tmp8
tmp10 = tmp1 / tmp6
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tmp3 / tmp6
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp5 / tmp6
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_2[grid(64)](buf3, primals_3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
return buf4, primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, primals_4, buf5
class CenterIntersectionNew(nn.Module):
def __init__(self, dim):
super(CenterIntersectionNew, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, input_0):
primals_1 = self.layer1.weight
primals_2 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
amayuelas/NNKGReasoning
|
CenterIntersection
| false
| 6,184
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
LinearZeros
|
import torch
from torch import nn
class LinearZeros(nn.Linear):
def __init__(self, in_channels, out_channels, logscale_factor=3):
super().__init__(in_channels, out_channels)
self.logscale_factor = logscale_factor
self.register_parameter('logs', nn.Parameter(torch.zeros(out_channels))
)
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
output = super().forward(input)
return output * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = 3.0
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_0[grid(256)](buf0, primals_4, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class LinearZerosNew(nn.Linear):
def __init__(self, in_channels, out_channels, logscale_factor=3):
super().__init__(in_channels, out_channels)
self.logscale_factor = logscale_factor
self.register_parameter('logs', nn.Parameter(torch.zeros(out_channels))
)
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_4 = self.logs
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
americast/glow-pytorch
|
LinearZeros
| false
| 6,185
|
[
"MIT"
] | 1
|
bbc576b96a5218417d25ae76b60f04ae24621de3
|
https://github.com/americast/glow-pytorch/tree/bbc576b96a5218417d25ae76b60f04ae24621de3
|
AndAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AndAttention(nn.Module):
def __init__(self, n_layers, entity_dim, temperature, attn_dropout=0.1):
super(AndAttention, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.n_layers = n_layers
for i in range(1, self.n_layers + 1):
setattr(self, 'and_layer_{}'.format(i), nn.Linear(2 *
entity_dim, 2 * entity_dim))
self.last_layer = nn.Linear(2 * entity_dim, entity_dim)
def forward(self, x1, x2):
x = torch.stack((x1, x2), dim=1)
attn = torch.matmul(x / self.temperature, x.transpose(1, 2))
attn = self.dropout(F.softmax(attn, dim=-1))
x = torch.matmul(attn, x)
x1 = x[:, 0, :]
x2 = x[:, 1, :]
x = torch.cat((x1, x2), dim=-1)
for i in range(1, self.n_layers + 1):
x = F.relu(getattr(self, 'and_layer_{}'.format(i))(x))
x = self.last_layer(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1, 'entity_dim': 4, 'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 8
x0 = xindex % 4
x1 = xindex // 4 % 2
x2 = xindex // 8
x4 = xindex
tmp0 = x3
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + (x0 + 4 * x1)), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0 + 4 * x1)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = 0.25
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (8 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 + 8 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 8), (8, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_stack_1[grid(32)](primals_1, primals_2, buf1, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 2, 2), (4, 2, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (4, 4, 2), (8, 1,
4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 2, 2), (4, 2, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf4 = buf0
del buf0
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 2, 4), (8, 4,
1), 0), out=buf4)
buf5 = buf1
del buf1
triton_poi_fused_cat_3[grid(32)](buf4, buf5, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 8), (8, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_3, (8, 8), (1, 8
), 0), out=buf6)
del primals_3
buf7 = buf6
del buf6
triton_poi_fused_relu_4[grid(32)](buf7, primals_4, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_4
buf8 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
extern_kernels.addmm(primals_6, buf7, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf8)
del primals_6
return buf8, buf5, buf7, primals_5
class AndAttentionNew(nn.Module):
def __init__(self, n_layers, entity_dim, temperature, attn_dropout=0.1):
super(AndAttentionNew, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.n_layers = n_layers
for i in range(1, self.n_layers + 1):
setattr(self, 'and_layer_{}'.format(i), nn.Linear(2 *
entity_dim, 2 * entity_dim))
self.last_layer = nn.Linear(2 * entity_dim, entity_dim)
def forward(self, input_0, input_1):
primals_3 = self.and_layer_1.weight
primals_4 = self.and_layer_1.bias
primals_5 = self.last_layer.weight
primals_6 = self.last_layer.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
amayuelas/NNKGReasoning
|
AndAttention
| false
| 6,186
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
SpatialSEBlock
|
import torch
from torch import nn
class SpatialSEBlock(nn.Module):
def __init__(self, channel):
super(SpatialSEBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=channel, out_channels=1,
kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
y = self.sigmoid(self.conv(x))
return x * y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](primals_3, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
class SpatialSEBlockNew(nn.Module):
def __init__(self, channel):
super(SpatialSEBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels=channel, out_channels=1,
kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
amitkumarj441/TGS_Kaggle
|
SpatialSEBlock
| false
| 6,187
|
[
"MIT"
] | 1
|
a4f613046cc36f3f6dbec28adb35f97a63c2a994
|
https://github.com/amitkumarj441/TGS_Kaggle/tree/a4f613046cc36f3f6dbec28adb35f97a63c2a994
|
BAP
|
import torch
import torch.nn as nn
class BAP(nn.Module):
def __init__(self, pool='GAP'):
super(BAP, self).__init__()
assert pool in ['GAP', 'GMP']
if pool == 'GAP':
self.pool = nn.AdaptiveAvgPool2d(1)
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, features, attentions):
B = features.size(0)
M = attentions.size(1)
for i in range(M):
AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B,
1, -1)
if i == 0:
feature_matrix = AiF
else:
feature_matrix = torch.cat([feature_matrix, AiF], dim=1)
return feature_matrix
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_cat_mean_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (16 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + (32 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.load(in_ptr1 + (48 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp8 = tmp0 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp14 = tmp0 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp20 = tmp0 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp25 = 16.0
tmp26 = tmp24 / tmp25
tl.store(out_ptr4 + (x0 + 16 * x1), tmp26, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
tl.store(out_ptr2 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 3
x0 = xindex % 4
x2 = xindex // 12
x3 = xindex % 12
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = 16.0
tmp10 = tmp8 / tmp9
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp7, tmp10, tmp11)
tmp13 = tmp0 >= tmp5
tmp14 = tmp13 & tmp4
tmp15 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tmp15 / tmp9
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp14, tmp16, tmp17)
tmp19 = tl.where(tmp6, tmp12, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tl.full([1], 3, tl.int64)
tmp25 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp22 & xmask, eviction_policy
='evict_last', other=0.0)
tmp26 = tmp25 / tmp9
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp22, tmp26, tmp27)
tmp29 = tl.where(tmp4, tmp21, tmp28)
tl.store(out_ptr0 + (x3 + 16 * x2), tmp29, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = reinterpret_tensor(buf6, (4, 1, 4), (16, 4, 1), 12)
get_raw_stream(0)
triton_per_fused_cat_mean_mul_0[grid(16)](arg0_1, arg1_1, buf0,
buf1, buf2, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf4 = reinterpret_tensor(buf6, (4, 3, 4), (16, 4, 1), 0)
triton_poi_fused_cat_1[grid(48)](buf0, buf1, buf2, buf4, 48, XBLOCK
=64, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
return buf6,
class BAPNew(nn.Module):
def __init__(self, pool='GAP'):
super(BAPNew, self).__init__()
assert pool in ['GAP', 'GMP']
if pool == 'GAP':
self.pool = nn.AdaptiveAvgPool2d(1)
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
amobiny/hide_and_seek
|
BAP
| false
| 6,188
|
[
"MIT"
] | 1
|
e298d9a352a6ee58e9beedf15ef3d700473b7f27
|
https://github.com/amobiny/hide_and_seek/tree/e298d9a352a6ee58e9beedf15ef3d700473b7f27
|
BetaIntersection
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BetaIntersection(nn.Module):
def __init__(self, dim):
super(BetaIntersection, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(2 * self.dim, 2 * self.dim)
self.layer2 = nn.Linear(2 * self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, alpha_embeddings, beta_embeddings):
all_embeddings = torch.cat([alpha_embeddings, beta_embeddings], dim=-1)
layer1_act = F.relu(self.layer1(all_embeddings))
attention = F.softmax(self.layer2(layer1_act), dim=0)
alpha_embedding = torch.sum(attention * alpha_embeddings, dim=0)
beta_embedding = torch.sum(attention * beta_embeddings, dim=0)
return alpha_embedding, beta_embedding
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp11 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp12 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp15 = tl.load(in_ptr2 + x0, xmask)
tmp17 = tl.load(in_ptr2 + (64 + x0), xmask)
tmp20 = tl.load(in_ptr2 + (128 + x0), xmask)
tmp23 = tl.load(in_ptr2 + (192 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp0 * tmp15
tmp18 = tmp3 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp7 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp11 * tmp23
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp25, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (8, 8), (8, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 8), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf2,
primals_4, buf8, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_4[grid(64)](buf5, primals_1, primals_2,
buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
return buf6, buf7, primals_1, primals_2, reinterpret_tensor(buf0, (64,
8), (8, 1), 0), reinterpret_tensor(buf2, (64, 8), (8, 1), 0
), buf3, primals_5, buf8
class BetaIntersectionNew(nn.Module):
def __init__(self, dim):
super(BetaIntersectionNew, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(2 * self.dim, 2 * self.dim)
self.layer2 = nn.Linear(2 * self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, input_0, input_1):
primals_3 = self.layer1.weight
primals_4 = self.layer1.bias
primals_5 = self.layer2.weight
primals_6 = self.layer2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
amayuelas/NNKGReasoning
|
BetaIntersection
| false
| 6,189
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
TorchGloVeModel
|
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn.init import xavier_uniform_
class TorchGloVeModel(nn.Module):
def __init__(self, n_words, embed_dim):
super().__init__()
self.n_words = n_words
self.embed_dim = embed_dim
self.W = self._init_weights(self.n_words, self.embed_dim)
self.C = self._init_weights(self.n_words, self.embed_dim)
self.bw = self._init_weights(self.n_words, 1)
self.bc = self._init_weights(self.n_words, 1)
def _init_weights(self, m, n):
return nn.Parameter(xavier_uniform_(torch.empty(m, n)))
def forward(self, X_log, idx):
"""
Parameters
----------
X_log : torch.FloatTensor, shape `(batch_size, n_vocab)`.
idx : torch.LongTensor, shape `(batch_size, )`
Indices of the vocab items in the current batch.
Returns
-------
torch.FloatTensor, shape `(n_vocab, n_vocab)`.
"""
preds = self.W[idx].matmul(self.C.T) + self.bw[idx] + self.bc.T
diffs = preds - X_log
return diffs
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'n_words': 4, 'embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
from torch.nn.init import xavier_uniform_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_index_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x4, xmask)
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr2 + tmp5, xmask, eviction_policy='evict_last')
tmp8 = tmp0 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 - tmp11
tl.store(out_ptr0 + x4, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(16)](primals_2, primals_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 4), (1, 4
), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_index_sub_1[grid(256)](buf1, primals_2,
primals_4, primals_5, primals_6, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
del primals_4
del primals_5
del primals_6
return buf2, primals_2, buf0, primals_3
class TorchGloVeModelNew(nn.Module):
def __init__(self, n_words, embed_dim):
super().__init__()
self.n_words = n_words
self.embed_dim = embed_dim
self.W = self._init_weights(self.n_words, self.embed_dim)
self.C = self._init_weights(self.n_words, self.embed_dim)
self.bw = self._init_weights(self.n_words, 1)
self.bc = self._init_weights(self.n_words, 1)
def _init_weights(self, m, n):
return nn.Parameter(xavier_uniform_(torch.empty(m, n)))
def forward(self, input_0, input_1):
primals_1 = self.W
primals_3 = self.C
primals_4 = self.bw
primals_5 = self.bc
primals_6 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
ammarhusain/cs224u
|
TorchGloVeModel
| false
| 6,190
|
[
"Apache-2.0"
] | 1
|
bbdb0aaa6b7437481e2e1fab8e12bbf1996eecd1
|
https://github.com/ammarhusain/cs224u/tree/bbdb0aaa6b7437481e2e1fab8e12bbf1996eecd1
|
MixerBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MlpBlock(nn.Module):
def __init__(self, features, hidden_dim):
super().__init__()
self.hidden_dim = hidden_dim
self.features = features
self.fc1 = nn.Linear(self.features, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, int(self.features))
def forward(self, x):
y = self.fc1(x)
y = F.gelu(y)
return self.fc2(y)
class MixerBlock(nn.Module):
"""Mixer block layer"""
def __init__(self, tokens_dim, tokens_mlp_dim, channels_dim,
channels_mlp_dim):
super().__init__()
self.tokens_mlp_dim = tokens_mlp_dim
self.tokens_dim = tokens_dim
self.channels_mlp_dim = channels_mlp_dim
self.channels_dim = channels_dim
self.token_mixer = MlpBlock(self.tokens_dim, self.tokens_mlp_dim)
self.channel_mixer = MlpBlock(self.channels_dim, self.channels_mlp_dim)
def forward(self, x):
y = F.layer_norm(x, x.shape[1:])
y = torch.transpose(y, 1, 2)
y = self.token_mixer(y)
y = torch.transpose(y, 1, 2)
x = x + y
y = F.layer_norm(x, x.shape[1:])
return x + self.channel_mixer(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'tokens_dim': 4, 'tokens_mlp_dim': 4, 'channels_dim': 4,
'channels_mlp_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_clone_native_layer_norm_0(in_ptr0, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r2 = rindex % 4
r3 = rindex // 4 % 4
r4 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp17 = tl.load(in_ptr0 + (r2 + 4 * r4 + 16 * r3 + 64 * x0), xmask,
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp18 = tmp17 - tmp10
tmp19 = 64.0
tmp20 = tmp16 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tl.store(out_ptr2 + (r1 + 64 * x0), tmp24, xmask)
@triton.jit
def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2(in_ptr0
, in_ptr1, in_ptr2, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r4 = rindex
x0 = xindex
r1 = rindex % 4
r2 = rindex // 4 % 4
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r4 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * r3 + 16 * r2 + 64 * x0), xmask,
other=0.0)
tmp2 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = tmp4 - tmp14
tmp22 = 64.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tmp28 = 0.015625
tmp29 = tmp26 * tmp28
tl.store(out_ptr2 + (r4 + 64 * x0), tmp27, xmask)
tl.store(out_ptr3 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x4, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x4, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_clone_native_layer_norm_0[grid(4)](primals_1, buf3,
4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf4)
del primals_2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_1[grid(256)](buf4, primals_3, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf6)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2[
grid(4)](primals_1, buf6, primals_5, buf10, buf15, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_gelu_3[grid(256)](buf11, buf12, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf13
triton_poi_fused_add_4[grid(256)](buf14, primals_1, buf6, primals_5,
primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_1
del primals_5
del primals_9
return buf14, primals_3, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), buf10, buf11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0
), primals_8, primals_6, buf15, primals_4
class MlpBlock(nn.Module):
def __init__(self, features, hidden_dim):
super().__init__()
self.hidden_dim = hidden_dim
self.features = features
self.fc1 = nn.Linear(self.features, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, int(self.features))
def forward(self, x):
y = self.fc1(x)
y = F.gelu(y)
return self.fc2(y)
class MixerBlockNew(nn.Module):
"""Mixer block layer"""
def __init__(self, tokens_dim, tokens_mlp_dim, channels_dim,
channels_mlp_dim):
super().__init__()
self.tokens_mlp_dim = tokens_mlp_dim
self.tokens_dim = tokens_dim
self.channels_mlp_dim = channels_mlp_dim
self.channels_dim = channels_dim
self.token_mixer = MlpBlock(self.tokens_dim, self.tokens_mlp_dim)
self.channel_mixer = MlpBlock(self.channels_dim, self.channels_mlp_dim)
def forward(self, input_0):
primals_2 = self.token_mixer.fc1.weight
primals_3 = self.token_mixer.fc1.bias
primals_4 = self.token_mixer.fc2.weight
primals_5 = self.token_mixer.fc2.bias
primals_6 = self.channel_mixer.fc1.weight
primals_7 = self.channel_mixer.fc1.bias
primals_8 = self.channel_mixer.fc2.weight
primals_9 = self.channel_mixer.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
amayuelas/NNKGReasoning
|
MixerBlock
| false
| 6,191
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
SampaddingConv1D
|
import torch
import torch.nn as nn
class SampaddingConv1D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, use_bias=True):
super(SampaddingConv1D, self).__init__()
self.use_bias = use_bias
self.padding = nn.ConstantPad1d((int((kernel_size - 1) / 2), int(
kernel_size / 2)), 0)
self.conv1d = torch.nn.Conv1d(in_channels=in_channels, out_channels
=out_channels, kernel_size=kernel_size, bias=self.use_bias)
def forward(self, X):
X = self.padding(X)
X = self.conv1d(X)
return X
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x1 = xindex // 7
x2 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(28)](primals_1, buf0, 28,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 7
), (0, 7, 1), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4), (4, 1), 0
), primals_2, reinterpret_tensor(buf0, (1, 4, 7), (28, 7, 1), 0)
class SampaddingConv1DNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, use_bias=True):
super(SampaddingConv1DNew, self).__init__()
self.use_bias = use_bias
self.padding = nn.ConstantPad1d((int((kernel_size - 1) / 2), int(
kernel_size / 2)), 0)
self.conv1d = torch.nn.Conv1d(in_channels=in_channels, out_channels
=out_channels, kernel_size=kernel_size, bias=self.use_bias)
def forward(self, input_0):
primals_2 = self.conv1d.weight
primals_3 = self.conv1d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
amoonfana/Knowledge_Distillation
|
SampaddingConv1D
| false
| 6,192
|
[
"Apache-2.0"
] | 1
|
1ee814a8f70ae00d17e1e1ee778d5420d96c43c4
|
https://github.com/amoonfana/Knowledge_Distillation/tree/1ee814a8f70ae00d17e1e1ee778d5420d96c43c4
|
ChannelPool
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ChannelPool(nn.MaxPool1d):
def forward(self, X):
X = X.permute(1, 2, 0)
pooled = F.max_pool1d(X, self.kernel_size)
pooled = pooled.permute(2, 0, 1).squeeze(0)
return pooled
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4), (4, 1), 0),
class ChannelPoolNew(nn.MaxPool1d):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ananyaganesh/ftmp
|
ChannelPool
| false
| 6,193
|
[
"MIT"
] | 1
|
9ee23939f0c1da854846b8ce1a9abe4e9b377031
|
https://github.com/ananyaganesh/ftmp/tree/9ee23939f0c1da854846b8ce1a9abe4e9b377031
|
FFNet
|
import torch
import torch.nn as nn
class MyRelu(nn.Module):
def __init__(self):
super().__init__()
self.myrelu1 = nn.ReLU()
def forward(self, x):
out1 = self.myrelu1(x)
return out1
class FFNet(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(FFNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.lh = nn.Linear(hidden_size, hidden_size)
self.l2 = nn.Linear(hidden_size, output_size)
self.lr = nn.Linear(output_size, hidden_size)
self.relu2 = MyRelu()
def forward(self, x):
out1 = self.l1(x)
out2 = self.relu2(out1)
out3 = self.lh(out2)
out4 = self.relu2(out3)
out5 = self.l2(out4)
outr = out1 - self.lr(out5)
out7 = self.relu2(outr)
out8 = self.lh(out7)
out9 = self.relu2(out8)
out10 = self.l2(out9)
return out10
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_sub_threshold_backward_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(in_out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf0,
primals_2, buf1, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_5, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (4, 4), (1, 4
), 0), out=buf5)
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_sub_threshold_backward_2[grid(256)](buf6,
primals_2, buf5, primals_9, buf11, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_9
buf7 = buf5
del buf5
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf8,
primals_5, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf8, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_7
return reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf6, (64, 4),
(4, 1), 0), reinterpret_tensor(buf8, (64, 4), (4, 1), 0
), primals_6, buf10, primals_4, buf11, primals_8, buf12, buf13
class MyRelu(nn.Module):
def __init__(self):
super().__init__()
self.myrelu1 = nn.ReLU()
def forward(self, x):
out1 = self.myrelu1(x)
return out1
class FFNetNew(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(FFNetNew, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.lh = nn.Linear(hidden_size, hidden_size)
self.l2 = nn.Linear(hidden_size, output_size)
self.lr = nn.Linear(output_size, hidden_size)
self.relu2 = MyRelu()
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.lh.weight
primals_5 = self.lh.bias
primals_6 = self.l2.weight
primals_7 = self.l2.bias
primals_8 = self.lr.weight
primals_9 = self.lr.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
amilanpathirana/FeedForwardNet
|
FFNet
| false
| 6,194
|
[
"MIT"
] | 1
|
bdf0ebe3f80233fe970e4c60754d0ffe13cadbe1
|
https://github.com/amilanpathirana/FeedForwardNet/tree/bdf0ebe3f80233fe970e4c60754d0ffe13cadbe1
|
SampaddingMaxPool1D
|
import torch
import torch.nn as nn
class SampaddingMaxPool1D(nn.Module):
def __init__(self, pooling_size, stride):
super(SampaddingMaxPool1D, self).__init__()
self.pooling_size = pooling_size
self.stride = stride
self.padding = nn.ConstantPad1d((int((pooling_size - 1) / 2), int(
pooling_size / 2)), 0)
self.maxpool1d = nn.MaxPool1d(self.pooling_size, stride=self.stride)
def forward(self, X):
X = self.padding(X)
X = self.maxpool1d(X)
return X
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'pooling_size': 4, 'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-1 + x2), tmp5 & xmask, other=0.0)
tmp7 = x0
tmp8 = tmp7 >= tmp1
tmp9 = tmp7 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tl.load(in_ptr0 + x2, tmp10 & xmask, other=0.0)
tmp12 = triton_helpers.maximum(tmp11, tmp6)
tmp13 = 1 + x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp13 < tmp3
tmp16 = tmp14 & tmp15
tmp17 = tl.load(in_ptr0 + (1 + x2), tmp16 & xmask, other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp12)
tmp19 = 2 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tl.load(in_ptr0 + (2 + x2), tmp22 & xmask, other=0.0)
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tl.store(out_ptr0 + x2, tmp24, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4), (4, 1), 0),
class SampaddingMaxPool1DNew(nn.Module):
def __init__(self, pooling_size, stride):
super(SampaddingMaxPool1DNew, self).__init__()
self.pooling_size = pooling_size
self.stride = stride
self.padding = nn.ConstantPad1d((int((pooling_size - 1) / 2), int(
pooling_size / 2)), 0)
self.maxpool1d = nn.MaxPool1d(self.pooling_size, stride=self.stride)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
amoonfana/Knowledge_Distillation
|
SampaddingMaxPool1D
| false
| 6,195
|
[
"Apache-2.0"
] | 1
|
1ee814a8f70ae00d17e1e1ee778d5420d96c43c4
|
https://github.com/amoonfana/Knowledge_Distillation/tree/1ee814a8f70ae00d17e1e1ee778d5420d96c43c4
|
CMVN
|
import torch
import torch.nn as nn
class CMVN(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVN, self).__init__()
if mode != 'global':
raise NotImplementedError(
'Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def forward(self, x):
if self.mode == 'global':
return (x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std
(self.dim, keepdim=True))
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-10
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tl.store(out_ptr0 + x3, tmp27, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CMVNNew(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVNNew, self).__init__()
if mode != 'global':
raise NotImplementedError(
'Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ana-kuznetsova/s3prl
|
CMVN
| false
| 6,196
|
[
"Apache-2.0"
] | 1
|
1fd3309f693f9cd765f56b12375ed0e7c41ef093
|
https://github.com/ana-kuznetsova/s3prl/tree/1fd3309f693f9cd765f56b12375ed0e7c41ef093
|
PositionalEncoding
|
import torch
from torch import nn
import torch.nn
import torch.optim
class PositionalEncoding(nn.Module):
"""
A special, non-learnable positional encoding for handling variable (possibly longer)
lengths of inputs. We simply add an ordinal number as an additional dimension for
the input embeddings, and then project them back to the original number of dimensions
"""
def __init__(self, dim_model):
super().__init__()
self.pos_embed = nn.Linear(dim_model + 1, dim_model)
self.activation = nn.ReLU()
def forward(self, x):
device = x.device
batch_size, seq_len, _ = x.shape
position_idx = torch.arange(0, seq_len, device=device).unsqueeze(0
).repeat(batch_size, 1).reshape(batch_size, seq_len, 1)
x_pos = torch.cat((x, position_idx), dim=2)
return self.activation(self.pos_embed(x_pos))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x3 = xindex // 5
x1 = xindex // 5 % 4
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = x1
tmp10 = tmp9.to(tl.float32)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp6, tmp10, tmp11)
tmp13 = tl.where(tmp4, tmp5, tmp12)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 5), (5, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 5), (5, 1), 0),
reinterpret_tensor(primals_2, (5, 4), (1, 5), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2,
primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 5), (5, 1), 0), buf3
class PositionalEncodingNew(nn.Module):
"""
A special, non-learnable positional encoding for handling variable (possibly longer)
lengths of inputs. We simply add an ordinal number as an additional dimension for
the input embeddings, and then project them back to the original number of dimensions
"""
def __init__(self, dim_model):
super().__init__()
self.pos_embed = nn.Linear(dim_model + 1, dim_model)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_2 = self.pos_embed.weight
primals_3 = self.pos_embed.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ananthsub/ReAgent
|
PositionalEncoding
| false
| 6,197
|
[
"BSD-3-Clause"
] | 1
|
92f223a135b8fbc0942a217acb117ad0935897a3
|
https://github.com/ananthsub/ReAgent/tree/92f223a135b8fbc0942a217acb117ad0935897a3
|
ToTensor
|
from torch.nn import Module
import torch
class ToTensor(Module):
def __init__(self):
super(ToTensor, self).__init__()
def forward(self, x):
x = x / 255
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.00392156862745098
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ToTensorNew(Module):
def __init__(self):
super(ToTensorNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
alinavalinav/finn
|
ToTensor
| false
| 6,198
|
[
"BSD-3-Clause"
] | 1
|
e443a5859066a410a63c08dcfec4a90527ca24be
|
https://github.com/alinavalinav/finn/tree/e443a5859066a410a63c08dcfec4a90527ca24be
|
Discriminator2d
|
import torch
import torch.nn as nn
import torch.utils.data
import torch
class Discriminator2d(nn.Module):
def __init__(self, ngpu, wd, nc_d):
super(Discriminator2d, self).__init__()
self.ngpu = ngpu
self.conv0 = nn.Conv2d(nc_d, 2 ** (wd - 4), 4, 2, 1)
self.conv1 = nn.Conv2d(2 ** (wd - 4), 2 ** (wd - 3), 4, 2, 1)
self.conv2 = nn.Conv2d(2 ** (wd - 3), 2 ** (wd - 2), 4, 2, 1)
self.conv3 = nn.Conv2d(2 ** (wd - 2), 2 ** (wd - 1), 4, 2, 1)
self.conv4 = nn.Conv2d(2 ** (wd - 1), 2 ** wd, 4, 2, 1)
self.conv5 = nn.Conv2d(2 ** wd, 1, 4, 2, 0)
def forward(self, x):
x = nn.ReLU()(self.conv0(x))
x = nn.ReLU()(self.conv1(x))
x = nn.ReLU()(self.conv2(x))
x = nn.ReLU()(self.conv3(x))
x = nn.ReLU()(self.conv4(x))
return self.conv5(x)
def get_inputs():
return [torch.rand([4, 4, 128, 128])]
def get_init_inputs():
return [[], {'ngpu': False, 'wd': 4, 'nc_d': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 2
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 128, 128), (65536, 16384, 128, 1))
assert_size_stride(primals_4, (2, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (4, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (8, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (8,), (1,))
assert_size_stride(primals_10, (16, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (1, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 32, 32), (2048, 1024, 32, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(8192)](buf3, primals_5,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(4096)](buf5, primals_7,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 8, 8, 8), (512, 64, 8, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(2048)](buf7, primals_9,
2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 4, 4), (256, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(1024)](buf9, primals_11,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 1, 1), (1, 1, 1, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_5[grid(4)](buf11, primals_13, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_13
return (buf11, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, buf1, buf3, buf5, buf7, buf9)
class Discriminator2dNew(nn.Module):
def __init__(self, ngpu, wd, nc_d):
super(Discriminator2dNew, self).__init__()
self.ngpu = ngpu
self.conv0 = nn.Conv2d(nc_d, 2 ** (wd - 4), 4, 2, 1)
self.conv1 = nn.Conv2d(2 ** (wd - 4), 2 ** (wd - 3), 4, 2, 1)
self.conv2 = nn.Conv2d(2 ** (wd - 3), 2 ** (wd - 2), 4, 2, 1)
self.conv3 = nn.Conv2d(2 ** (wd - 2), 2 ** (wd - 1), 4, 2, 1)
self.conv4 = nn.Conv2d(2 ** (wd - 1), 2 ** wd, 4, 2, 1)
self.conv5 = nn.Conv2d(2 ** wd, 1, 4, 2, 0)
def forward(self, input_0):
primals_1 = self.conv0.weight
primals_2 = self.conv0.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_8 = self.conv3.weight
primals_9 = self.conv3.bias
primals_10 = self.conv4.weight
primals_11 = self.conv4.bias
primals_12 = self.conv5.weight
primals_13 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
amirDahari1/super-res
|
Discriminator2d
| false
| 6,199
|
[
"MIT"
] | 1
|
2a93a20d65c570a5398caef65957fb612c3581c8
|
https://github.com/amirDahari1/super-res/tree/2a93a20d65c570a5398caef65957fb612c3581c8
|
KD
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class KD(nn.Module):
def __init__(self, alpha, T):
super(KD, self).__init__()
self.alpha = alpha
self.T = T
def forward(self, output_stu, output_tch, label):
loss_stu = F.cross_entropy(output_stu, label)
label_stu = F.log_softmax(output_stu / self.T, dim=1)
label_tch = F.softmax(output_tch / self.T, dim=1)
loss_tch = F.kl_div(label_stu, label_tch) * self.T * self.T
loss = loss_stu * (1 - self.alpha) + loss_tch * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4, 'T': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 1.0
tmp10 = tmp0 * tmp9
tmp11 = tmp1 * tmp9
tmp12 = tmp2 * tmp9
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp4 * tmp9
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp6 * tmp9
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp10 - tmp17
tmp19 = 0.25
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr1 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_add_div_mean_mul_neg_sub_sum_xlogy_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp19 = tl.load(in_ptr2 + r3, None)
tmp20 = tl.load(in_ptr2 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr2 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr3 + r3, None)
tmp37 = tl.load(in_ptr3 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr3 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr3 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp45 = tl.load(in_ptr3 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tmp19 / tmp26
tmp28 = libdevice.isnan(tmp27).to(tl.int1)
tmp29 = 0.0
tmp30 = tmp27 == tmp29
tmp31 = tl_math.log(tmp27)
tmp32 = tmp27 * tmp31
tmp33 = tl.where(tmp30, tmp29, tmp32)
tmp34 = float('nan')
tmp35 = tl.where(tmp28, tmp34, tmp33)
tmp38 = tl_math.exp(tmp37)
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp44 + tmp46
tmp48 = tl_math.log(tmp47)
tmp49 = tmp36 - tmp48
tmp50 = tmp27 * tmp49
tmp51 = tmp35 - tmp50
tmp52 = tl.broadcast_to(tmp51, [RBLOCK])
tmp54 = triton_helpers.promote_to_tensor(tl.sum(tmp52, 0))
tmp55 = -tmp18
tmp56 = 0.015625
tmp57 = tmp55 * tmp56
tmp58 = -3.0
tmp59 = tmp57 * tmp58
tmp60 = 256.0
tmp61 = tmp54 / tmp60
tmp62 = 4.0
tmp63 = tmp61 * tmp62
tmp64 = tmp63 * tmp62
tmp65 = tmp64 * tmp62
tmp66 = tmp59 + tmp65
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp66, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](arg2_1, buf2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg2_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf6 = buf1
del buf1
triton_per_fused__log_softmax__softmax_add_div_mean_mul_neg_sub_sum_xlogy_2[
grid(1)](buf6, buf0, arg0_1, buf2, buf4, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del buf0
del buf2
del buf4
return buf6,
class KDNew(nn.Module):
def __init__(self, alpha, T):
super(KDNew, self).__init__()
self.alpha = alpha
self.T = T
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
amoonfana/Knowledge_Distillation
|
KD
| false
| 6,200
|
[
"Apache-2.0"
] | 1
|
1ee814a8f70ae00d17e1e1ee778d5420d96c43c4
|
https://github.com/amoonfana/Knowledge_Distillation/tree/1ee814a8f70ae00d17e1e1ee778d5420d96c43c4
|
handpose_model
|
import torch
from collections import OrderedDict
import torch.nn as nn
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class handpose_model(nn.Module):
def __init__(self):
super(handpose_model, self).__init__()
no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',
'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
block1_0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2',
[64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [
64, 128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), (
'pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), (
'conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1
]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0
]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3,
1, 1]), ('conv4_3', [512, 512, 3, 1, 1]), ('conv4_4', [512, 512,
3, 1, 1]), ('conv5_1', [512, 512, 3, 1, 1]), ('conv5_2', [512,
512, 3, 1, 1]), ('conv5_3_CPM', [512, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv6_1_CPM', [128, 512, 1, 1, 0]), (
'conv6_2_CPM', [512, 22, 1, 1, 0])])
blocks = {}
blocks['block1_0'] = block1_0
blocks['block1_1'] = block1_1
for i in range(2, 7):
blocks['block%d' % i] = OrderedDict([('Mconv1_stage%d' % i, [
150, 128, 7, 1, 3]), ('Mconv2_stage%d' % i, [128, 128, 7, 1,
3]), ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), (
'Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d' % i, [128, 22, 1, 1, 0])])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_0 = blocks['block1_0']
self.model1_1 = blocks['block1_1']
self.model2 = blocks['block2']
self.model3 = blocks['block3']
self.model4 = blocks['block4']
self.model5 = blocks['block5']
self.model6 = blocks['block6']
def forward(self, x):
out1_0 = self.model1_0(x)
out1_1 = self.model1_1(out1_0)
concat_stage2 = torch.cat([out1_1, out1_0], 1)
out_stage2 = self.model2(concat_stage2)
concat_stage3 = torch.cat([out_stage2, out1_0], 1)
out_stage3 = self.model3(concat_stage3)
concat_stage4 = torch.cat([out_stage3, out1_0], 1)
out_stage4 = self.model4(concat_stage4)
concat_stage5 = torch.cat([out_stage4, out1_0], 1)
out_stage5 = self.model5(concat_stage5)
concat_stage6 = torch.cat([out_stage5, out1_0], 1)
out_stage6 = self.model6(concat_stage6)
return out_stage6
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from collections import OrderedDict
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 38400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64 % 150
x0 = xindex % 64
x2 = xindex // 9600
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 22, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 1408 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 150, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-22 + x1) + 8192 * x2), tmp10 &
xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 5632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 22
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_29, (512,), (1,))
assert_size_stride(primals_30, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (128,), (1,))
assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_33, (512,), (1,))
assert_size_stride(primals_34, (22, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_35, (22,), (1,))
assert_size_stride(primals_36, (128, 150, 7, 7), (7350, 49, 7, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_39, (128,), (1,))
assert_size_stride(primals_40, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_41, (128,), (1,))
assert_size_stride(primals_42, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_43, (128,), (1,))
assert_size_stride(primals_44, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_45, (128,), (1,))
assert_size_stride(primals_46, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_47, (128,), (1,))
assert_size_stride(primals_48, (22, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_49, (22,), (1,))
assert_size_stride(primals_50, (128, 150, 7, 7), (7350, 49, 7, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_53, (128,), (1,))
assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_55, (128,), (1,))
assert_size_stride(primals_56, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_57, (128,), (1,))
assert_size_stride(primals_58, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_59, (128,), (1,))
assert_size_stride(primals_60, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_61, (128,), (1,))
assert_size_stride(primals_62, (22, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_63, (22,), (1,))
assert_size_stride(primals_64, (128, 150, 7, 7), (7350, 49, 7, 1))
assert_size_stride(primals_65, (128,), (1,))
assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_67, (128,), (1,))
assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_69, (128,), (1,))
assert_size_stride(primals_70, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_71, (128,), (1,))
assert_size_stride(primals_72, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_73, (128,), (1,))
assert_size_stride(primals_74, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_75, (128,), (1,))
assert_size_stride(primals_76, (22, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_77, (22,), (1,))
assert_size_stride(primals_78, (128, 150, 7, 7), (7350, 49, 7, 1))
assert_size_stride(primals_79, (128,), (1,))
assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_81, (128,), (1,))
assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_83, (128,), (1,))
assert_size_stride(primals_84, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_85, (128,), (1,))
assert_size_stride(primals_86, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_87, (128,), (1,))
assert_size_stride(primals_88, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_89, (128,), (1,))
assert_size_stride(primals_90, (22, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_91, (22,), (1,))
assert_size_stride(primals_92, (128, 150, 7, 7), (7350, 49, 7, 1))
assert_size_stride(primals_93, (128,), (1,))
assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_95, (128,), (1,))
assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_97, (128,), (1,))
assert_size_stride(primals_98, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_99, (128,), (1,))
assert_size_stride(primals_100, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_101, (128,), (1,))
assert_size_stride(primals_102, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_103, (128,), (1,))
assert_size_stride(primals_104, (22, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_105, (22,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4,
buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9,
buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19,
buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_6[grid(131072)](buf25, primals_21,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 512, 8, 8), (32768, 64, 8, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_6[grid(131072)](buf27, primals_23,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_23
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 8, 8), (32768, 64, 8, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_6[grid(131072)](buf29, primals_25,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_25
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 512, 8, 8), (32768, 64, 8, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_6[grid(131072)](buf31, primals_27,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_27
buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 512, 8, 8), (32768, 64, 8, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_6[grid(131072)](buf33, primals_29,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_29
buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_7[grid(32768)](buf35, primals_31,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_6[grid(131072)](buf37, primals_33,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_33
buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 22, 8, 8), (1408, 64, 8, 1))
buf39 = empty_strided_cuda((4, 150, 8, 8), (9600, 64, 8, 1), torch.
float32)
triton_poi_fused_cat_8[grid(38400)](buf38, primals_35, buf35, buf39,
38400, XBLOCK=512, num_warps=4, num_stages=1)
del buf38
del primals_35
buf40 = extern_kernels.convolution(buf39, primals_36, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_7[grid(32768)](buf41, primals_37,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_37
buf42 = extern_kernels.convolution(buf41, primals_38, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 128, 8, 8), (8192, 64, 8, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_7[grid(32768)](buf43, primals_39,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_39
buf44 = extern_kernels.convolution(buf43, primals_40, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 128, 8, 8), (8192, 64, 8, 1))
buf45 = buf44
del buf44
triton_poi_fused_convolution_relu_7[grid(32768)](buf45, primals_41,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_41
buf46 = extern_kernels.convolution(buf45, primals_42, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 128, 8, 8), (8192, 64, 8, 1))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_7[grid(32768)](buf47, primals_43,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_43
buf48 = extern_kernels.convolution(buf47, primals_44, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 128, 8, 8), (8192, 64, 8, 1))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_7[grid(32768)](buf49, primals_45,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_45
buf50 = extern_kernels.convolution(buf49, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 128, 8, 8), (8192, 64, 8, 1))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_7[grid(32768)](buf51, primals_47,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
buf52 = extern_kernels.convolution(buf51, primals_48, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 22, 8, 8), (1408, 64, 8, 1))
buf53 = empty_strided_cuda((4, 150, 8, 8), (9600, 64, 8, 1), torch.
float32)
triton_poi_fused_cat_8[grid(38400)](buf52, primals_49, buf35, buf53,
38400, XBLOCK=512, num_warps=4, num_stages=1)
del buf52
del primals_49
buf54 = extern_kernels.convolution(buf53, primals_50, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 128, 8, 8), (8192, 64, 8, 1))
buf55 = buf54
del buf54
triton_poi_fused_convolution_relu_7[grid(32768)](buf55, primals_51,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_51
buf56 = extern_kernels.convolution(buf55, primals_52, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 8, 8), (8192, 64, 8, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_7[grid(32768)](buf57, primals_53,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_53
buf58 = extern_kernels.convolution(buf57, primals_54, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 128, 8, 8), (8192, 64, 8, 1))
buf59 = buf58
del buf58
triton_poi_fused_convolution_relu_7[grid(32768)](buf59, primals_55,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_55
buf60 = extern_kernels.convolution(buf59, primals_56, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 128, 8, 8), (8192, 64, 8, 1))
buf61 = buf60
del buf60
triton_poi_fused_convolution_relu_7[grid(32768)](buf61, primals_57,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_57
buf62 = extern_kernels.convolution(buf61, primals_58, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1))
buf63 = buf62
del buf62
triton_poi_fused_convolution_relu_7[grid(32768)](buf63, primals_59,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_59
buf64 = extern_kernels.convolution(buf63, primals_60, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1))
buf65 = buf64
del buf64
triton_poi_fused_convolution_relu_7[grid(32768)](buf65, primals_61,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_61
buf66 = extern_kernels.convolution(buf65, primals_62, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 22, 8, 8), (1408, 64, 8, 1))
buf67 = empty_strided_cuda((4, 150, 8, 8), (9600, 64, 8, 1), torch.
float32)
triton_poi_fused_cat_8[grid(38400)](buf66, primals_63, buf35, buf67,
38400, XBLOCK=512, num_warps=4, num_stages=1)
del buf66
del primals_63
buf68 = extern_kernels.convolution(buf67, primals_64, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1))
buf69 = buf68
del buf68
triton_poi_fused_convolution_relu_7[grid(32768)](buf69, primals_65,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_65
buf70 = extern_kernels.convolution(buf69, primals_66, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1))
buf71 = buf70
del buf70
triton_poi_fused_convolution_relu_7[grid(32768)](buf71, primals_67,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_67
buf72 = extern_kernels.convolution(buf71, primals_68, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1))
buf73 = buf72
del buf72
triton_poi_fused_convolution_relu_7[grid(32768)](buf73, primals_69,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_69
buf74 = extern_kernels.convolution(buf73, primals_70, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 128, 8, 8), (8192, 64, 8, 1))
buf75 = buf74
del buf74
triton_poi_fused_convolution_relu_7[grid(32768)](buf75, primals_71,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_71
buf76 = extern_kernels.convolution(buf75, primals_72, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1))
buf77 = buf76
del buf76
triton_poi_fused_convolution_relu_7[grid(32768)](buf77, primals_73,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_73
buf78 = extern_kernels.convolution(buf77, primals_74, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78
del buf78
triton_poi_fused_convolution_relu_7[grid(32768)](buf79, primals_75,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_75
buf80 = extern_kernels.convolution(buf79, primals_76, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 22, 8, 8), (1408, 64, 8, 1))
buf81 = empty_strided_cuda((4, 150, 8, 8), (9600, 64, 8, 1), torch.
float32)
triton_poi_fused_cat_8[grid(38400)](buf80, primals_77, buf35, buf81,
38400, XBLOCK=512, num_warps=4, num_stages=1)
del buf80
del primals_77
buf82 = extern_kernels.convolution(buf81, primals_78, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1))
buf83 = buf82
del buf82
triton_poi_fused_convolution_relu_7[grid(32768)](buf83, primals_79,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_79
buf84 = extern_kernels.convolution(buf83, primals_80, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1))
buf85 = buf84
del buf84
triton_poi_fused_convolution_relu_7[grid(32768)](buf85, primals_81,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_81
buf86 = extern_kernels.convolution(buf85, primals_82, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1))
buf87 = buf86
del buf86
triton_poi_fused_convolution_relu_7[grid(32768)](buf87, primals_83,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_83
buf88 = extern_kernels.convolution(buf87, primals_84, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf88, (4, 128, 8, 8), (8192, 64, 8, 1))
buf89 = buf88
del buf88
triton_poi_fused_convolution_relu_7[grid(32768)](buf89, primals_85,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_85
buf90 = extern_kernels.convolution(buf89, primals_86, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf90, (4, 128, 8, 8), (8192, 64, 8, 1))
buf91 = buf90
del buf90
triton_poi_fused_convolution_relu_7[grid(32768)](buf91, primals_87,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_87
buf92 = extern_kernels.convolution(buf91, primals_88, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 128, 8, 8), (8192, 64, 8, 1))
buf93 = buf92
del buf92
triton_poi_fused_convolution_relu_7[grid(32768)](buf93, primals_89,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_89
buf94 = extern_kernels.convolution(buf93, primals_90, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf94, (4, 22, 8, 8), (1408, 64, 8, 1))
buf95 = empty_strided_cuda((4, 150, 8, 8), (9600, 64, 8, 1), torch.
float32)
triton_poi_fused_cat_8[grid(38400)](buf94, primals_91, buf35, buf95,
38400, XBLOCK=512, num_warps=4, num_stages=1)
del buf94
del primals_91
buf96 = extern_kernels.convolution(buf95, primals_92, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf96, (4, 128, 8, 8), (8192, 64, 8, 1))
buf97 = buf96
del buf96
triton_poi_fused_convolution_relu_7[grid(32768)](buf97, primals_93,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_93
buf98 = extern_kernels.convolution(buf97, primals_94, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 128, 8, 8), (8192, 64, 8, 1))
buf99 = buf98
del buf98
triton_poi_fused_convolution_relu_7[grid(32768)](buf99, primals_95,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_95
buf100 = extern_kernels.convolution(buf99, primals_96, stride=(1, 1
), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf100, (4, 128, 8, 8), (8192, 64, 8, 1))
buf101 = buf100
del buf100
triton_poi_fused_convolution_relu_7[grid(32768)](buf101, primals_97,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_97
buf102 = extern_kernels.convolution(buf101, primals_98, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 128, 8, 8), (8192, 64, 8, 1))
buf103 = buf102
del buf102
triton_poi_fused_convolution_relu_7[grid(32768)](buf103, primals_99,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_99
buf104 = extern_kernels.convolution(buf103, primals_100, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf104, (4, 128, 8, 8), (8192, 64, 8, 1))
buf105 = buf104
del buf104
triton_poi_fused_convolution_relu_7[grid(32768)](buf105,
primals_101, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_101
buf106 = extern_kernels.convolution(buf105, primals_102, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf106, (4, 128, 8, 8), (8192, 64, 8, 1))
buf107 = buf106
del buf106
triton_poi_fused_convolution_relu_7[grid(32768)](buf107,
primals_103, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_103
buf108 = extern_kernels.convolution(buf107, primals_104, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf108, (4, 22, 8, 8), (1408, 64, 8, 1))
buf109 = buf108
del buf108
triton_poi_fused_convolution_9[grid(5632)](buf109, primals_105,
5632, XBLOCK=256, num_warps=4, num_stages=1)
del primals_105
return (buf109, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, primals_48,
primals_50, primals_52, primals_54, primals_56, primals_58,
primals_60, primals_62, primals_64, primals_66, primals_68,
primals_70, primals_72, primals_74, primals_76, primals_78,
primals_80, primals_82, primals_84, primals_86, primals_88,
primals_90, primals_92, primals_94, primals_96, primals_98,
primals_100, primals_102, primals_104, buf1, buf3, buf4, buf5, buf7,
buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23,
buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41,
buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59,
buf61, buf63, buf65, buf67, buf69, buf71, buf73, buf75, buf77,
buf79, buf81, buf83, buf85, buf87, buf89, buf91, buf93, buf95,
buf97, buf99, buf101, buf103, buf105, buf107)
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class handpose_modelNew(nn.Module):
def __init__(self):
super(handpose_modelNew, self).__init__()
no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',
'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
block1_0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2',
[64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [
64, 128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), (
'pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), (
'conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1
]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0
]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3,
1, 1]), ('conv4_3', [512, 512, 3, 1, 1]), ('conv4_4', [512, 512,
3, 1, 1]), ('conv5_1', [512, 512, 3, 1, 1]), ('conv5_2', [512,
512, 3, 1, 1]), ('conv5_3_CPM', [512, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv6_1_CPM', [128, 512, 1, 1, 0]), (
'conv6_2_CPM', [512, 22, 1, 1, 0])])
blocks = {}
blocks['block1_0'] = block1_0
blocks['block1_1'] = block1_1
for i in range(2, 7):
blocks['block%d' % i] = OrderedDict([('Mconv1_stage%d' % i, [
150, 128, 7, 1, 3]), ('Mconv2_stage%d' % i, [128, 128, 7, 1,
3]), ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), (
'Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d' % i, [128, 22, 1, 1, 0])])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_0 = blocks['block1_0']
self.model1_1 = blocks['block1_1']
self.model2 = blocks['block2']
self.model3 = blocks['block3']
self.model4 = blocks['block4']
self.model5 = blocks['block5']
self.model6 = blocks['block6']
def forward(self, input_0):
primals_1 = self.model1_0.conv1_1.weight
primals_2 = self.model1_0.conv1_1.bias
primals_4 = self.model1_0.conv1_2.weight
primals_5 = self.model1_0.conv1_2.bias
primals_6 = self.model1_0.conv2_1.weight
primals_7 = self.model1_0.conv2_1.bias
primals_8 = self.model1_0.conv2_2.weight
primals_9 = self.model1_0.conv2_2.bias
primals_10 = self.model1_0.conv3_1.weight
primals_11 = self.model1_0.conv3_1.bias
primals_12 = self.model1_0.conv3_2.weight
primals_13 = self.model1_0.conv3_2.bias
primals_14 = self.model1_0.conv3_3.weight
primals_15 = self.model1_0.conv3_3.bias
primals_16 = self.model1_0.conv3_4.weight
primals_17 = self.model1_0.conv3_4.bias
primals_18 = self.model1_0.conv4_1.weight
primals_19 = self.model1_0.conv4_1.bias
primals_20 = self.model1_0.conv4_2.weight
primals_21 = self.model1_0.conv4_2.bias
primals_22 = self.model1_0.conv4_3.weight
primals_23 = self.model1_0.conv4_3.bias
primals_24 = self.model1_0.conv4_4.weight
primals_25 = self.model1_0.conv4_4.bias
primals_26 = self.model1_0.conv5_1.weight
primals_27 = self.model1_0.conv5_1.bias
primals_28 = self.model1_0.conv5_2.weight
primals_29 = self.model1_0.conv5_2.bias
primals_30 = self.model1_0.conv5_3_CPM.weight
primals_31 = self.model1_0.conv5_3_CPM.bias
primals_32 = self.model1_1.conv6_1_CPM.weight
primals_33 = self.model1_1.conv6_1_CPM.bias
primals_34 = self.model1_1.conv6_2_CPM.weight
primals_35 = self.model1_1.conv6_2_CPM.bias
primals_36 = self.model2.Mconv1_stage2.weight
primals_37 = self.model2.Mconv1_stage2.bias
primals_38 = self.model2.Mconv2_stage2.weight
primals_39 = self.model2.Mconv2_stage2.bias
primals_40 = self.model2.Mconv3_stage2.weight
primals_41 = self.model2.Mconv3_stage2.bias
primals_42 = self.model2.Mconv4_stage2.weight
primals_43 = self.model2.Mconv4_stage2.bias
primals_44 = self.model2.Mconv5_stage2.weight
primals_45 = self.model2.Mconv5_stage2.bias
primals_46 = self.model2.Mconv6_stage2.weight
primals_47 = self.model2.Mconv6_stage2.bias
primals_48 = self.model2.Mconv7_stage2.weight
primals_49 = self.model2.Mconv7_stage2.bias
primals_50 = self.model3.Mconv1_stage3.weight
primals_51 = self.model3.Mconv1_stage3.bias
primals_52 = self.model3.Mconv2_stage3.weight
primals_53 = self.model3.Mconv2_stage3.bias
primals_54 = self.model3.Mconv3_stage3.weight
primals_55 = self.model3.Mconv3_stage3.bias
primals_56 = self.model3.Mconv4_stage3.weight
primals_57 = self.model3.Mconv4_stage3.bias
primals_58 = self.model3.Mconv5_stage3.weight
primals_59 = self.model3.Mconv5_stage3.bias
primals_60 = self.model3.Mconv6_stage3.weight
primals_61 = self.model3.Mconv6_stage3.bias
primals_62 = self.model3.Mconv7_stage3.weight
primals_63 = self.model3.Mconv7_stage3.bias
primals_64 = self.model4.Mconv1_stage4.weight
primals_65 = self.model4.Mconv1_stage4.bias
primals_66 = self.model4.Mconv2_stage4.weight
primals_67 = self.model4.Mconv2_stage4.bias
primals_68 = self.model4.Mconv3_stage4.weight
primals_69 = self.model4.Mconv3_stage4.bias
primals_70 = self.model4.Mconv4_stage4.weight
primals_71 = self.model4.Mconv4_stage4.bias
primals_72 = self.model4.Mconv5_stage4.weight
primals_73 = self.model4.Mconv5_stage4.bias
primals_74 = self.model4.Mconv6_stage4.weight
primals_75 = self.model4.Mconv6_stage4.bias
primals_76 = self.model4.Mconv7_stage4.weight
primals_77 = self.model4.Mconv7_stage4.bias
primals_78 = self.model5.Mconv1_stage5.weight
primals_79 = self.model5.Mconv1_stage5.bias
primals_80 = self.model5.Mconv2_stage5.weight
primals_81 = self.model5.Mconv2_stage5.bias
primals_82 = self.model5.Mconv3_stage5.weight
primals_83 = self.model5.Mconv3_stage5.bias
primals_84 = self.model5.Mconv4_stage5.weight
primals_85 = self.model5.Mconv4_stage5.bias
primals_86 = self.model5.Mconv5_stage5.weight
primals_87 = self.model5.Mconv5_stage5.bias
primals_88 = self.model5.Mconv6_stage5.weight
primals_89 = self.model5.Mconv6_stage5.bias
primals_90 = self.model5.Mconv7_stage5.weight
primals_91 = self.model5.Mconv7_stage5.bias
primals_92 = self.model6.Mconv1_stage6.weight
primals_93 = self.model6.Mconv1_stage6.bias
primals_94 = self.model6.Mconv2_stage6.weight
primals_95 = self.model6.Mconv2_stage6.bias
primals_96 = self.model6.Mconv3_stage6.weight
primals_97 = self.model6.Mconv3_stage6.bias
primals_98 = self.model6.Mconv4_stage6.weight
primals_99 = self.model6.Mconv4_stage6.bias
primals_100 = self.model6.Mconv5_stage6.weight
primals_101 = self.model6.Mconv5_stage6.bias
primals_102 = self.model6.Mconv6_stage6.weight
primals_103 = self.model6.Mconv6_stage6.bias
primals_104 = self.model6.Mconv7_stage6.weight
primals_105 = self.model6.Mconv7_stage6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105])
return output[0]
|
alanlee-chn/handpose-est
|
handpose_model
| false
| 6,201
|
[
"MIT"
] | 1
|
241a6beb45e045e65a328aade22ce536f4dcd893
|
https://github.com/alanlee-chn/handpose-est/tree/241a6beb45e045e65a328aade22ce536f4dcd893
|
FeedForward
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2048, 4), (4, 1))
assert_size_stride(primals_2, (2048,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2048), (2048, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1,
primals_2, buf3, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1,
2048), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), primals_4, buf3
class FeedForwardNew(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, input_0):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
and-smith/Vac-Scholar-Curb-GAN
|
FeedForward
| false
| 6,202
|
[
"MIT"
] | 1
|
142bd70fdf0f1cbc4a1c20c5e58fa5b6a9dbe742
|
https://github.com/and-smith/Vac-Scholar-Curb-GAN/tree/142bd70fdf0f1cbc4a1c20c5e58fa5b6a9dbe742
|
VAE
|
import torch
from torch.nn import functional as F
import torch.nn as nn
import torch.utils.data
class VAE(nn.Module):
def __init__(self, dim, middle=400, bottleneck=100):
super(VAE, self).__init__()
self.dim = dim
self.fc1 = nn.Linear(dim, middle)
self.fc21 = nn.Linear(middle, bottleneck)
self.fc22 = nn.Linear(middle, bottleneck)
self.fc3 = nn.Linear(bottleneck, middle)
self.fc4 = nn.Linear(middle, dim)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import functional as F
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 400), (400, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (100, 400), (400, 1))
assert_size_stride(primals_7, (100,), (1,))
assert_size_stride(primals_8, (400, 100), (100, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (4, 400), (400, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf12, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 400),
(400, 1), 0), reinterpret_tensor(primals_4, (400, 100), (1, 400
), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 400),
(400, 1), 0), reinterpret_tensor(primals_6, (400, 100), (1, 400
), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = torch.ops.aten.randn.default([4, 4, 4, 100], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1),
torch.float32)
triton_poi_fused_add_exp_mul_1[grid(6400)](buf2, buf5, buf3, buf6,
6400, XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_8, (100, 400), (1, 100), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf7
buf11 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf8,
primals_9, buf11, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_10, (400, 4), (1, 400), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
triton_poi_fused_sigmoid_2[grid(256)](buf10, primals_11, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return buf10, reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100,
1), 0), reinterpret_tensor(buf3, (4, 4, 4, 100), (1600, 400, 100, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 100), (1600, 400, 100, 1), 0
), buf5, reinterpret_tensor(buf6, (64, 100), (100, 1), 0
), reinterpret_tensor(buf8, (64, 400), (400, 1), 0
), buf10, primals_10, buf11, primals_8, primals_6, primals_4, buf12
class VAENew(nn.Module):
def __init__(self, dim, middle=400, bottleneck=100):
super(VAENew, self).__init__()
self.dim = dim
self.fc1 = nn.Linear(dim, middle)
self.fc21 = nn.Linear(middle, bottleneck)
self.fc22 = nn.Linear(middle, bottleneck)
self.fc3 = nn.Linear(bottleneck, middle)
self.fc4 = nn.Linear(middle, dim)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc21.weight
primals_5 = self.fc21.bias
primals_6 = self.fc22.weight
primals_7 = self.fc22.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_10 = self.fc4.weight
primals_11 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2]
|
anandijain/audio
|
VAE
| false
| 6,203
|
[
"MIT"
] | 1
|
1990de57ebc760cf6c5cc7132119b389cfd2dbfb
|
https://github.com/anandijain/audio/tree/1990de57ebc760cf6c5cc7132119b389cfd2dbfb
|
SelfAttentionPooling
|
import torch
import torch.nn as nn
class SelfAttentionPooling(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
softmax = nn.functional.softmax
att_w = softmax(self.W(batch_rep).squeeze(-1)).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_mul_sum_2[grid(64)](primals_3, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf3
return buf4, primals_3, buf1
class SelfAttentionPoolingNew(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPoolingNew, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, input_0):
primals_1 = self.W.weight
primals_2 = self.W.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ana-kuznetsova/s3prl
|
SelfAttentionPooling
| false
| 6,204
|
[
"Apache-2.0"
] | 1
|
1fd3309f693f9cd765f56b12375ed0e7c41ef093
|
https://github.com/ana-kuznetsova/s3prl/tree/1fd3309f693f9cd765f56b12375ed0e7c41ef093
|
down_right_shifted_conv2d
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
def right_shift(x, pad=None):
xs = [int(y) for y in x.size()]
x = x[:, :, :, :xs[3] - 1]
pad = nn.ZeroPad2d((1, 0, 0, 0)) if pad is None else pad
return pad(x)
class down_right_shifted_conv2d(nn.Module):
def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 2),
stride=(1, 1), shift_output_right=False, norm='weight_norm'):
super(down_right_shifted_conv2d, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.pad = nn.ZeroPad2d((filter_size[1] - 1, 0, filter_size[0] - 1, 0))
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size,
stride=stride)
self.shift_output_right = shift_output_right
self.norm = norm
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_right:
self.right_shift = lambda x: right_shift(x, pad=nn.ZeroPad2d((1,
0, 0, 0)))
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
x = self.bn(x) if self.norm == 'batch_norm' else x
return self.right_shift(x) if self.shift_output_right else x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_filters_in': 4, 'num_filters_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -1 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp5 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3,
primals_2, buf3, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3
def right_shift(x, pad=None):
xs = [int(y) for y in x.size()]
x = x[:, :, :, :xs[3] - 1]
pad = nn.ZeroPad2d((1, 0, 0, 0)) if pad is None else pad
return pad(x)
class down_right_shifted_conv2dNew(nn.Module):
def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 2),
stride=(1, 1), shift_output_right=False, norm='weight_norm'):
super(down_right_shifted_conv2dNew, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.pad = nn.ZeroPad2d((filter_size[1] - 1, 0, filter_size[0] - 1, 0))
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size,
stride=stride)
self.shift_output_right = shift_output_right
self.norm = norm
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_right:
self.right_shift = lambda x: right_shift(x, pad=nn.ZeroPad2d((1,
0, 0, 0)))
def forward(self, input_0):
primals_4 = self.conv.bias
primals_2 = self.conv.weight_g
primals_3 = self.conv.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andiac/pixel-cnn-pp
|
down_right_shifted_conv2d
| false
| 6,205
|
[
"MIT"
] | 1
|
3ba856320e40208cbb6e9cac3e66a739f148903e
|
https://github.com/andiac/pixel-cnn-pp/tree/3ba856320e40208cbb6e9cac3e66a739f148903e
|
SuperPointNet
|
import torch
class SuperPointNet(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNet, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1,
padding=0)
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1,
padding=0)
def forward(self, x):
""" Forward pass that jointly computes unprocessed point and descriptor
tensors.
Args:
- x: Image pytorch tensor shaped N x 1 x H x W.
Returns:
- semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8.
- desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8.
"""
x = self.relu(self.conv1a(x))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
cPa = self.relu(self.convPa(x))
semi = self.convPb(cPa)
cDa = self.relu(self.convDa(x))
desc = self.convDb(cDa)
dn = torch.norm(desc, p=2, dim=1)
desc = desc.div(torch.unsqueeze(dn, 1))
return semi, desc
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp4, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 16
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 8
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 256 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2176 + x0 + 256 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_13(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 260
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 65
y1 = yindex // 65
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 65 * x2 + 4160 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused_convolution_linalg_vector_norm_14(in_out_ptr0,
in_out_ptr1, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = libdevice.sqrt(tmp6)
tl.store(in_out_ptr0 + (r1 + 256 * x0), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_div_15(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 16384 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 64 * y1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (65, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_21, (65,), (1,))
assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_25, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4096, 9)](primals_4, buf0, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_6, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_8, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(8192, 9)](primals_10, buf3, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf7 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(32768, 9)](primals_18, buf7, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf8 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(32768, 9)](primals_22, buf8, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf9 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_convolution_relu_4[grid(256, 4096)](buf9,
primals_2, buf10, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf9
del primals_2
buf11 = extern_kernels.convolution(buf10, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_5[grid(1048576)](buf12, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(262144)](buf12,
buf13, buf14, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf15 = extern_kernels.convolution(buf13, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf16 = buf15
del buf15
triton_poi_fused_convolution_relu_7[grid(262144)](buf16, primals_7,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_7[grid(262144)](buf18, primals_9,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
buf20 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(65536)](buf18,
buf19, buf20, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_9[grid(131072)](buf22, primals_11,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_9[grid(131072)](buf24, primals_13,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
buf26 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(32768)](buf24,
buf25, buf26, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf28 = buf27
del buf27
triton_poi_fused_convolution_relu_11[grid(32768)](buf28, primals_15,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf30 = buf29
del buf29
triton_poi_fused_convolution_relu_11[grid(32768)](buf30, primals_17,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf32 = buf31
del buf31
triton_poi_fused_convolution_relu_12[grid(65536)](buf32, primals_19,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
buf33 = extern_kernels.convolution(buf32, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 65, 8, 8), (4160, 1, 520, 65))
buf34 = empty_strided_cuda((4, 65, 8, 8), (4160, 64, 8, 1), torch.
float32)
triton_poi_fused_convolution_13[grid(260, 64)](buf33, primals_21,
buf34, 260, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf33
del primals_21
buf35 = extern_kernels.convolution(buf30, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf36 = buf35
del buf35
triton_poi_fused_convolution_relu_12[grid(65536)](buf36, primals_23,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_23
buf37 = extern_kernels.convolution(buf36, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf38 = buf37
del buf37
buf39 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
buf40 = buf39
del buf39
triton_per_fused_convolution_linalg_vector_norm_14[grid(256)](buf38,
buf40, primals_25, 256, 256, num_warps=2, num_stages=1)
del primals_25
buf41 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_div_15[grid(1024, 64)](buf38, buf40, buf41, 1024,
64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
return (buf34, buf41, primals_1, primals_3, buf0, buf1, buf2, buf3,
buf4, buf5, buf6, buf7, primals_20, buf8, primals_24, buf10, buf12,
buf13, buf14, buf16, buf18, buf19, buf20, buf22, buf24, buf25,
buf26, buf28, buf30, buf32, buf36, buf38, reinterpret_tensor(buf40,
(4, 1, 8, 8), (64, 64, 8, 1), 0))
class SuperPointNetNew(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNetNew, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1,
padding=0)
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1,
padding=0)
def forward(self, input_0):
primals_1 = self.conv1a.weight
primals_2 = self.conv1a.bias
primals_4 = self.conv1b.weight
primals_5 = self.conv1b.bias
primals_6 = self.conv2a.weight
primals_7 = self.conv2a.bias
primals_8 = self.conv2b.weight
primals_9 = self.conv2b.bias
primals_10 = self.conv3a.weight
primals_11 = self.conv3a.bias
primals_12 = self.conv3b.weight
primals_13 = self.conv3b.bias
primals_14 = self.conv4a.weight
primals_15 = self.conv4a.bias
primals_16 = self.conv4b.weight
primals_17 = self.conv4b.bias
primals_18 = self.convPa.weight
primals_19 = self.convPa.bias
primals_20 = self.convPb.weight
primals_21 = self.convPb.bias
primals_22 = self.convDa.weight
primals_23 = self.convDa.bias
primals_24 = self.convDb.weight
primals_25 = self.convDb.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25])
return output[0], output[1]
|
albutko/vlb
|
SuperPointNet
| false
| 6,206
|
[
"BSD-2-Clause"
] | 1
|
437245c0991948eeb36a277937a7e67d389041e4
|
https://github.com/albutko/vlb/tree/437245c0991948eeb36a277937a7e67d389041e4
|
down_shifted_conv2d
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
def down_shift(x, pad=None):
xs = [int(y) for y in x.size()]
x = x[:, :, :xs[2] - 1, :]
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
class down_shifted_conv2d(nn.Module):
def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 3),
stride=(1, 1), shift_output_down=False, norm='weight_norm'):
super(down_shifted_conv2d, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size,
stride)
self.shift_output_down = shift_output_down
self.norm = norm
self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), int((
filter_size[1] - 1) / 2), filter_size[0] - 1, 0))
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_down:
self.down_shift = lambda x: down_shift(x, pad=nn.ZeroPad2d((0,
0, 1, 0)))
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
x = self.bn(x) if self.norm == 'batch_norm' else x
return self.down_shift(x) if self.shift_output_down else x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_filters_in': 4, 'num_filters_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 5
x0 = xindex % 6
x2 = xindex // 30
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -1 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp2 & tmp4
tmp8 = tmp7 & tmp6
tmp9 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 24 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 24 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 2, 3), (24, 6, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(480)](primals_1, buf0, 480,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 2, 3), (24, 6, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3,
primals_2, buf3, 4, 24, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3
def down_shift(x, pad=None):
xs = [int(y) for y in x.size()]
x = x[:, :, :xs[2] - 1, :]
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
class down_shifted_conv2dNew(nn.Module):
def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 3),
stride=(1, 1), shift_output_down=False, norm='weight_norm'):
super(down_shifted_conv2dNew, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size,
stride)
self.shift_output_down = shift_output_down
self.norm = norm
self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), int((
filter_size[1] - 1) / 2), filter_size[0] - 1, 0))
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_down:
self.down_shift = lambda x: down_shift(x, pad=nn.ZeroPad2d((0,
0, 1, 0)))
def forward(self, input_0):
primals_4 = self.conv.bias
primals_2 = self.conv.weight_g
primals_3 = self.conv.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andiac/pixel-cnn-pp
|
down_shifted_conv2d
| false
| 6,207
|
[
"MIT"
] | 1
|
3ba856320e40208cbb6e9cac3e66a739f148903e
|
https://github.com/andiac/pixel-cnn-pp/tree/3ba856320e40208cbb6e9cac3e66a739f148903e
|
ContrastiveLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise.
Code from https://github.com/adambielski/siamese-triplet"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, output1, output2, target, size_average=True):
distances = (output2 - output1).pow(2).sum(1)
losses = 0.5 * (target.float() * distances + (1 + -1 * target).
float() * F.relu(self.margin - (distances + self.eps).sqrt()).
pow(2))
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp0 * tmp3
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = 1e-09
tmp8 = tmp1 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 4.0
tmp11 = tmp10 - tmp9
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tmp13 * tmp13
tmp15 = tmp6 * tmp14
tmp16 = tmp2 + tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise.
Code from https://github.com/adambielski/siamese-triplet"""
def __init__(self, margin):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
anish-lu-yihe/abcpy
|
ContrastiveLoss
| false
| 6,208
|
[
"BSD-3-Clause-Clear"
] | 1
|
be58367c4d7e38ee696238e3d8405e8abe2defb7
|
https://github.com/anish-lu-yihe/abcpy/tree/be58367c4d7e38ee696238e3d8405e8abe2defb7
|
LabelSmoothingBCE
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class LabelSmoothingBCE(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingBCE, self).__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
smooth_target = target.clone().masked_fill(target == 1, self.confidence
)
smooth_target = smooth_target.masked_fill(target == 0, self.smoothing)
return self.criterion(x, smooth_target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = tmp0 == tmp3
tmp5 = tl.where(tmp4, tmp3, tmp0)
tmp6 = tl.where(tmp2, tmp1, tmp5)
tmp7 = tmp3 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.minimum(tmp1, tmp8)
tmp11 = tl_math.abs(tmp8)
tmp12 = -tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp10 - tmp14
tmp16 = tmp9 - tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0[grid
(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf0,
class LabelSmoothingBCENew(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingBCENew, self).__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anoushkt/craftassist
|
LabelSmoothingBCE
| false
| 6,209
|
[
"MIT"
] | 1
|
c200af65e52e800f0f0cc540fe836b644383349d
|
https://github.com/anoushkt/craftassist/tree/c200af65e52e800f0f0cc540fe836b644383349d
|
SmoothL1Loss
|
import torch
import torch.utils.data
def smooth_l1_loss(input, target, beta=1.0 / 9, size_average=True):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if size_average:
return loss.mean()
return loss.sum()
class SmoothL1Loss(torch.nn.Module):
def __init__(self, beta=1.0 / 9):
super(SmoothL1Loss, self).__init__()
self.beta = beta
def forward(self, input, target, size_average=True):
return smooth_l1_loss(input, target, self.beta, size_average)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_div_lt_mean_mul_pow_sub_where_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 0.1111111111111111
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = 9.0
tmp10 = tmp8 * tmp9
tmp11 = 0.05555555555555555
tmp12 = tmp3 - tmp11
tmp13 = tl.where(tmp5, tmp10, tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_div_lt_mean_mul_pow_sub_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def smooth_l1_loss(input, target, beta=1.0 / 9, size_average=True):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if size_average:
return loss.mean()
return loss.sum()
class SmoothL1LossNew(torch.nn.Module):
def __init__(self, beta=1.0 / 9):
super(SmoothL1LossNew, self).__init__()
self.beta = beta
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anslt/retinamask
|
SmoothL1Loss
| false
| 6,210
|
[
"MIT"
] | 1
|
12b58febfd0a5ed6914796a4a3db60c2a8181370
|
https://github.com/anslt/retinamask/tree/12b58febfd0a5ed6914796a4a3db60c2a8181370
|
nin
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
class nin(nn.Module):
def __init__(self, dim_in, dim_out):
super(nin, self).__init__()
self.lin_a = wn(nn.Linear(dim_in, dim_out))
self.dim_out = dim_out
def forward(self, x):
""" a network in network layer (1x1 CONV) """
x = x.permute(0, 2, 3, 1)
shp = [int(y) for y in x.size()]
out = self.lin_a(x.contiguous().view(shp[0] * shp[1] * shp[2], shp[3]))
shp[-1] = self.dim_out
out = out.view(shp)
return out.permute(0, 3, 1, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(4)](primals_3, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_2[grid(16)](primals_3,
primals_2, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf3)
del primals_4
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 1, 16, 4), 0
), buf2, primals_2, primals_3, reinterpret_tensor(buf0, (64, 4), (4,
1), 0), buf1
class ninNew(nn.Module):
def __init__(self, dim_in, dim_out):
super(ninNew, self).__init__()
self.lin_a = wn(nn.Linear(dim_in, dim_out))
self.dim_out = dim_out
def forward(self, input_0):
primals_4 = self.lin_a.bias
primals_2 = self.lin_a.weight_g
primals_3 = self.lin_a.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andiac/pixel-cnn-pp
|
nin
| false
| 6,211
|
[
"MIT"
] | 1
|
3ba856320e40208cbb6e9cac3e66a739f148903e
|
https://github.com/andiac/pixel-cnn-pp/tree/3ba856320e40208cbb6e9cac3e66a739f148903e
|
UpsamplingBlock
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class UpsamplingBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel, stride, pad):
"""
Single block of upsampling operation
Input:
- int input_nc : Input number of channels
- int output_nc : Output number of channels
- int kernel : Kernel size
- int stride : Stride length
- int pad : Padd_moduleing
"""
super(UpsamplingBlock, self).__init__()
conv = nn.Conv2d
biup = nn.Upsample
block = nn.Sequential()
block.add_module('conv_1', conv(input_nc, output_nc, kernel, stride,
pad))
block.add_module('upsample_2', biup(scale_factor=2, mode='bilinear'))
self.biup_block = block
def forward(self, x):
return self.biup_block(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'output_nc': 4, 'kernel': 4, 'stride': 1,
'pad': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_3(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 18 % 18
x0 = xindex % 18
x6 = xindex // 324
x2 = xindex // 324 % 4
xindex // 1296
xindex % 1296
x7 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x6), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 9 * tmp4 + 81 * x6), xmask,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 9 * tmp25 + 81 * x6), xmask,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 9 * tmp25 + 81 * x6), xmask,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tl.store(out_ptr2 + x7, tmp36, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = empty_strided_cuda((18, 1), (1, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(18)](buf1, 18, XBLOCK=32,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((18, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_1[grid(18)](buf2, 18, XBLOCK=32,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((18,), (1,), torch.int64)
triton_poi_fused__to_copy_0[grid(18)](buf3, 18, XBLOCK=32,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((18,), (1,), torch.int64)
triton_poi_fused_add_clamp_1[grid(18)](buf4, 18, XBLOCK=32,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((18,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(18)](buf5,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((18, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(18)](buf7,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 18, 18), (1296, 324, 18, 1), torch
.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_sub_3[grid(5184)](
buf1, buf3, buf0, primals_2, buf4, buf5, buf2, buf7, buf9, 5184,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf9, primals_1, primals_3, buf1, buf2, buf3, buf4, buf5, buf7
class UpsamplingBlockNew(nn.Module):
def __init__(self, input_nc, output_nc, kernel, stride, pad):
"""
Single block of upsampling operation
Input:
- int input_nc : Input number of channels
- int output_nc : Output number of channels
- int kernel : Kernel size
- int stride : Stride length
- int pad : Padd_moduleing
"""
super(UpsamplingBlockNew, self).__init__()
conv = nn.Conv2d
biup = nn.Upsample
block = nn.Sequential()
block.add_module('conv_1', conv(input_nc, output_nc, kernel, stride,
pad))
block.add_module('upsample_2', biup(scale_factor=2, mode='bilinear'))
self.biup_block = block
def forward(self, input_0):
primals_1 = self.biup_block.conv_1.weight
primals_2 = self.biup_block.conv_1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
andrewjong/Guided-pix2pix
|
UpsamplingBlock
| false
| 6,212
|
[
"BSD-3-Clause"
] | 1
|
0c6a7b5fde50ad7ea4fb20a6136fc6cb6c4e5542
|
https://github.com/andrewjong/Guided-pix2pix/tree/0c6a7b5fde50ad7ea4fb20a6136fc6cb6c4e5542
|
MultiHeadAttention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_per_fused_1(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](buf1, primals_6, buf3, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_6
buf4 = reinterpret_tensor(buf1, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf1
triton_poi_fused_0[grid(16, 16)](buf0, primals_3, buf4, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused_1[grid(256)](buf5, buf9, 256, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf5
buf10 = reinterpret_tensor(buf0, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf0
triton_poi_fused_2[grid(16, 16)](buf2, primals_8, buf10, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0),
out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_11
return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0
), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttentionNew(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, input_0, input_1, input_2):
primals_2 = self.q_linear.weight
primals_3 = self.q_linear.bias
primals_5 = self.v_linear.weight
primals_6 = self.v_linear.bias
primals_7 = self.k_linear.weight
primals_8 = self.k_linear.bias
primals_10 = self.out.weight
primals_11 = self.out.bias
primals_1 = input_0
primals_4 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
and-smith/Vac-Scholar-Curb-GAN
|
MultiHeadAttention
| false
| 6,213
|
[
"MIT"
] | 1
|
142bd70fdf0f1cbc4a1c20c5e58fa5b6a9dbe742
|
https://github.com/and-smith/Vac-Scholar-Curb-GAN/tree/142bd70fdf0f1cbc4a1c20c5e58fa5b6a9dbe742
|
HighwayNetwork
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class HighwayNetwork(nn.Module):
def __init__(self, in_dim, out_dim):
super(HighwayNetwork, self).__init__()
self.gate_proj = nn.Linear(in_dim, out_dim)
self.lin_proj = nn.Linear(in_dim, out_dim)
self.nonlin_proj = nn.Linear(in_dim, out_dim)
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
else:
torch.nn.init.constant_(p, 0)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x) - 2)
lin = self.lin_proj(x)
nonlin = torch.relu(self.nonlin_proj(x))
res = gate * nonlin + (1 - gate) * lin
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp10 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp3 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp3
tmp11 = tmp9 * tmp10
tmp12 = tmp7 + tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0[grid(256)](buf0,
buf2, buf1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, buf1, buf2
class HighwayNetworkNew(nn.Module):
def __init__(self, in_dim, out_dim):
super(HighwayNetworkNew, self).__init__()
self.gate_proj = nn.Linear(in_dim, out_dim)
self.lin_proj = nn.Linear(in_dim, out_dim)
self.nonlin_proj = nn.Linear(in_dim, out_dim)
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
else:
torch.nn.init.constant_(p, 0)
def forward(self, input_0):
primals_1 = self.gate_proj.weight
primals_2 = self.gate_proj.bias
primals_4 = self.lin_proj.weight
primals_5 = self.lin_proj.bias
primals_6 = self.nonlin_proj.weight
primals_7 = self.nonlin_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
anoushkt/craftassist
|
HighwayNetwork
| false
| 6,214
|
[
"MIT"
] | 1
|
c200af65e52e800f0f0cc540fe836b644383349d
|
https://github.com/anoushkt/craftassist/tree/c200af65e52e800f0f0cc540fe836b644383349d
|
HighwayLayer
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
def my_xavier_init(m, gain=1):
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayer(torch.nn.Module):
def __init__(self, dim):
super(HighwayLayer, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x))
nlin = torch.tanh(self.nlin_proj(x))
res = gate * nlin + (1 - gate) * x
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, buf1,
primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
def my_xavier_init(m, gain=1):
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayerNew(torch.nn.Module):
def __init__(self, dim):
super(HighwayLayerNew, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, input_0):
primals_1 = self.gate_proj.weight
primals_2 = self.gate_proj.bias
primals_4 = self.nlin_proj.weight
primals_5 = self.nlin_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
anoushkt/craftassist
|
HighwayLayer
| false
| 6,215
|
[
"MIT"
] | 1
|
c200af65e52e800f0f0cc540fe836b644383349d
|
https://github.com/anoushkt/craftassist/tree/c200af65e52e800f0f0cc540fe836b644383349d
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
self.smooth = 1.0
def forward(self, y_pred, y_true):
assert y_pred.size() == y_true.size()
y_pred = y_pred[:, 0].contiguous().view(-1)
y_true = y_true[:, 0].contiguous().view(-1)
intersection = (y_pred * y_true).sum()
dsc = (2.0 * intersection + self.smooth) / (y_pred.sum() + y_true.
sum() + self.smooth)
return 1.0 - dsc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None)
tmp1 = tl.load(in_ptr1 + (64 * (r0 // 16) + r0 % 16), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = tmp14 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
self.smooth = 1.0
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anudeepsekhar/Lane-Detection-Pytorch
|
DiceLoss
| false
| 6,216
|
[
"MIT"
] | 1
|
cfddda8a0768cf83afd87e29d605fd58aa89df59
|
https://github.com/anudeepsekhar/Lane-Detection-Pytorch/tree/cfddda8a0768cf83afd87e29d605fd58aa89df59
|
MixtureSoftmax
|
import torch
import torch.nn as nn
def project_simplex(x):
"""
Project an arbitary vector onto the simplex.
See [Wang & Carreira-Perpin 2013] for a description and references.
"""
n = x.size()[0]
mu = torch.sort(x, 0, descending=True)[0]
sm = 0
for j in xrange(1, n + 1):
sm += mu[j - 1]
t = mu[j - 1] - 1.0 / j * (sm - 1)
if t > 0:
row = j
sm_row = sm
theta = 1.0 / row * (sm_row - 1)
y = x - theta
return torch.clamp(y, min=0.0)
class MixtureSoftmax(nn.Module):
"""
"""
def __init__(self, in_features):
super(MixtureSoftmax, self).__init__()
self.weight = nn.Parameter(torch.Tensor(in_features))
self.reset_weights()
def reset_weights(self):
self.weight.data.normal_(10, 1.0)
self.weight.data /= self.weight.data.sum()
def forward(self, input):
return torch.sum(input * self.weight[None, :, None], dim=1)
def project_parameters(self):
for p in self.parameters():
p.data = project_simplex(p.data)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp6 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + x4, tmp11, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](primals_2, primals_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
return buf0, primals_2
def project_simplex(x):
"""
Project an arbitary vector onto the simplex.
See [Wang & Carreira-Perpin 2013] for a description and references.
"""
n = x.size()[0]
mu = torch.sort(x, 0, descending=True)[0]
sm = 0
for j in xrange(1, n + 1):
sm += mu[j - 1]
t = mu[j - 1] - 1.0 / j * (sm - 1)
if t > 0:
row = j
sm_row = sm
theta = 1.0 / row * (sm_row - 1)
y = x - theta
return torch.clamp(y, min=0.0)
class MixtureSoftmaxNew(nn.Module):
"""
"""
def __init__(self, in_features):
super(MixtureSoftmaxNew, self).__init__()
self.weight = nn.Parameter(torch.Tensor(in_features))
self.reset_weights()
def reset_weights(self):
self.weight.data.normal_(10, 1.0)
self.weight.data /= self.weight.data.sum()
def project_parameters(self):
for p in self.parameters():
p.data = project_simplex(p.data)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
anuar12/deep_game_theory
|
MixtureSoftmax
| false
| 6,217
|
[
"MIT"
] | 1
|
1debe5a498fe5f017f2791965a5e529b0dfb0529
|
https://github.com/anuar12/deep_game_theory/tree/1debe5a498fe5f017f2791965a5e529b0dfb0529
|
vggUpconv
|
import torch
import torch.nn as nn
class vggUpconv(nn.Module):
"""Some Information about vggUpconv"""
def __init__(self, in_ch, out_ch, upsample=True):
super(vggUpconv, self).__init__()
if upsample:
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
else:
self.upsample = nn.Conv2d(in_ch, in_ch, 3, 1, 1)
self.conv1 = nn.Conv2d(in_ch, out_ch, 3, 1, 1)
def forward(self, x1, x2):
x1 = self.upsample(x1)
x1 = self.conv1(x1)
sum = x1 + x2
return sum
def get_inputs():
return [torch.rand([4, 4, 8, 8]), torch.rand([4, 4, 16, 16])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 7, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + 8 * tmp12 + 64 * x2), None,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + 8 * tmp12 + 64 * x2), None,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + 8 * tmp8 + 64 * x2), None,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + 8 * tmp8 + 64 * x2), None,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, None)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 16, 16), (1024, 256, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(4096)](buf2, primals_1, 4096, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 16, 16), (1024, 256, 16, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_1[grid(4096)](buf4, primals_3,
primals_4, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
del primals_4
return buf4, primals_2, buf2
class vggUpconvNew(nn.Module):
"""Some Information about vggUpconv"""
def __init__(self, in_ch, out_ch, upsample=True):
super(vggUpconvNew, self).__init__()
if upsample:
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
else:
self.upsample = nn.Conv2d(in_ch, in_ch, 3, 1, 1)
self.conv1 = nn.Conv2d(in_ch, out_ch, 3, 1, 1)
def forward(self, input_0, input_1):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
anudeepsekhar/Lane-Detection-Pytorch
|
vggUpconv
| false
| 6,218
|
[
"MIT"
] | 1
|
cfddda8a0768cf83afd87e29d605fd58aa89df59
|
https://github.com/anudeepsekhar/Lane-Detection-Pytorch/tree/cfddda8a0768cf83afd87e29d605fd58aa89df59
|
MCCRLoss
|
import torch
from torch import nn
class MCCRLoss(nn.Module):
"""Maximum Correntropy Criterion Induced Losses for Regression(MCCR) Loss"""
def __init__(self, sigma=1.0):
super().__init__()
assert sigma > 0
self.sigma2 = sigma ** 2
def forward(self, _input: 'torch.Tensor', _target: 'torch.Tensor'
) ->torch.Tensor:
"""
Implement maximum correntropy criterion for regression
loss(y, t) = sigma^2 * (1.0 - exp(-(y-t)^2/sigma^2))
where sigma > 0 (parameter)
Reference:
* Feng, Yunlong, et al.
"Learning with the maximum correntropy criterion
induced losses for regression."
J. Mach. Learn. Res. 16.1 (2015): 993-1034.
"""
return torch.mean(self.sigma2 * (1 - torch.exp(-(_input - _target) **
2 / self.sigma2)))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_exp_mean_mul_neg_pow_rsub_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -tmp3
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 - tmp7
tmp9 = tmp8 * tmp5
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_exp_mean_mul_neg_pow_rsub_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MCCRLossNew(nn.Module):
"""Maximum Correntropy Criterion Induced Losses for Regression(MCCR) Loss"""
def __init__(self, sigma=1.0):
super().__init__()
assert sigma > 0
self.sigma2 = sigma ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
appleparan/mise.py
|
MCCRLoss
| false
| 6,219
|
[
"MIT"
] | 1
|
a77ea51be37a739928600c66d168d69b78bc0c4b
|
https://github.com/appleparan/mise.py/tree/a77ea51be37a739928600c66d168d69b78bc0c4b
|
OrMixer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MlpBlock(nn.Module):
def __init__(self, features, hidden_dim):
super().__init__()
self.hidden_dim = hidden_dim
self.features = features
self.fc1 = nn.Linear(self.features, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, int(self.features))
def forward(self, x):
y = self.fc1(x)
y = F.gelu(y)
return self.fc2(y)
class MixerBlock(nn.Module):
"""Mixer block layer"""
def __init__(self, tokens_dim, tokens_mlp_dim, channels_dim,
channels_mlp_dim):
super().__init__()
self.tokens_mlp_dim = tokens_mlp_dim
self.tokens_dim = tokens_dim
self.channels_mlp_dim = channels_mlp_dim
self.channels_dim = channels_dim
self.token_mixer = MlpBlock(self.tokens_dim, self.tokens_mlp_dim)
self.channel_mixer = MlpBlock(self.channels_dim, self.channels_mlp_dim)
def forward(self, x):
y = F.layer_norm(x, x.shape[1:])
y = torch.transpose(y, 1, 2)
y = self.token_mixer(y)
y = torch.transpose(y, 1, 2)
x = x + y
y = F.layer_norm(x, x.shape[1:])
return x + self.channel_mixer(y)
class MlpMixer(nn.Module):
"""Mixer architecture"""
def __init__(self, patches, feature_length, num_classes, num_blocks,
hidden_dim, tokens_mlp_dim, channels_mlp_dim):
super().__init__()
self.patches = patches
self.feature_length = feature_length
self.num_classes = num_classes
self.num_blocks = num_blocks
self.hidden_dim = hidden_dim
self.tokens_mlp_dim = tokens_mlp_dim
self.channels_mlp_dim = channels_mlp_dim
self.conv = nn.Conv1d(in_channels=self.patches, out_channels=self.
hidden_dim, kernel_size=1)
for nb in range(self.num_blocks):
setattr(self, 'mixerBlock_{}'.format(nb), MixerBlock(self.
feature_length, self.tokens_mlp_dim, self.hidden_dim, self.
channels_mlp_dim))
self.fc = nn.Linear(in_features=self.feature_length, out_features=
self.num_classes)
def forward(self, inputs):
x = torch.transpose(inputs, 1, 2)
x = self.conv(x)
x = torch.transpose(x, 1, 2)
for nb in range(self.num_blocks):
x = getattr(self, 'mixerBlock_{}'.format(nb))(x)
x = F.layer_norm(x, x.shape[1:])
x = torch.transpose(x, 1, 2)
x = torch.mean(x, dim=1)
return self.fc(x)
class OrMixer(nn.Module):
def __init__(self, n_layers, entity_dim):
super(OrMixer, self).__init__()
self.mlpMixer = MlpMixer(patches=2, feature_length=entity_dim,
num_classes=entity_dim, num_blocks=3, hidden_dim=20,
tokens_mlp_dim=200, channels_mlp_dim=4)
def forward(self, x1, x2):
x1 = x1.unsqueeze(-1)
x2 = x2.unsqueeze(-1)
x = torch.cat((x1, x2), dim=-1)
x = self.mlpMixer(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1, 'entity_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = tl.load(in_ptr1 + x1, tmp6 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 8 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_clone_convolution_native_layer_norm_2(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
rnumel = 80
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r3 = rindex
x0 = xindex
r2 = rindex // 4
tmp0 = tl.load(in_out_ptr0 + (r3 + 80 * x0), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last', other=0.0
)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask & xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 80, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(rmask & xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 80.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tl.store(in_out_ptr0 + (r3 + 80 * x0), tmp2, rmask & xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp23, xmask)
tl.store(out_ptr1 + (r3 + 80 * x0), tmp25, rmask & xmask)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_gelu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_layer_norm_native_layer_norm_backward_4(in_ptr0
, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 80
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r3 = rindex
x0 = xindex
r1 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r3 + 80 * x0), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + 80 * x0), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0
)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(rmask & xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 80, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(rmask & xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 80.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = 0.0125
tmp27 = tmp25 * tmp26
tl.store(out_ptr2 + x0, tmp27, xmask)
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp20, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 80
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 20
y0 = yindex % 20
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y1, ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + y1, ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 80.0
tmp9 = tmp7 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp6 * tmp12
tl.store(out_ptr0 + (y0 + 20 * x2 + 80 * y1), tmp13, xmask & ymask)
@triton.jit
def triton_poi_fused_gelu_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 80
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 20
y1 = yindex // 20
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0 + 20 * x2 + 80 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_8(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 80
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 80 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 80, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 80.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = 0.0125
tmp23 = tmp21 * tmp22
tl.store(out_ptr2 + x0, tmp23, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 20
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 80 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 80.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(out_ptr0 + (x2 + 20 * y3), tmp9, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 80
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 20
y1 = yindex // 20
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 20 * x2 + 80 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_mean_native_layer_norm_11(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 20
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 80 * x1), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 80.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(rmask & xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = 20.0
tmp15 = tmp13 / tmp14
tl.store(out_ptr0 + (r2 + 20 * x3), tmp9, rmask & xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (20, 2, 1), (2, 1, 1))
assert_size_stride(primals_4, (20,), (1,))
assert_size_stride(primals_5, (200, 4), (4, 1))
assert_size_stride(primals_6, (200,), (1,))
assert_size_stride(primals_7, (4, 200), (200, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 20), (20, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (20, 4), (4, 1))
assert_size_stride(primals_12, (20,), (1,))
assert_size_stride(primals_13, (200, 4), (4, 1))
assert_size_stride(primals_14, (200,), (1,))
assert_size_stride(primals_15, (4, 200), (200, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 20), (20, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (20, 4), (4, 1))
assert_size_stride(primals_20, (20,), (1,))
assert_size_stride(primals_21, (200, 4), (4, 1))
assert_size_stride(primals_22, (200,), (1,))
assert_size_stride(primals_23, (4, 200), (200, 1))
assert_size_stride(primals_24, (4,), (1,))
assert_size_stride(primals_25, (4, 20), (20, 1))
assert_size_stride(primals_26, (4,), (1,))
assert_size_stride(primals_27, (20, 4), (4, 1))
assert_size_stride(primals_28, (20,), (1,))
assert_size_stride(primals_29, (4, 4), (4, 1))
assert_size_stride(primals_30, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
triton_poi_fused_convolution_1[grid(8, 4)](buf0, buf1, 8, 4, XBLOCK
=4, YBLOCK=8, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 4), (80, 4, 1))
del buf1
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf5, (4, 1, 1), (1, 1, 1), 0)
del buf5
buf8 = empty_strided_cuda((4, 20, 4), (80, 4, 1), torch.float32)
triton_per_fused_clone_convolution_native_layer_norm_2[grid(4)](buf3,
buf7, primals_4, buf4, buf8, 4, 80, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_4
buf9 = empty_strided_cuda((80, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (80, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 200), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 20, 200), (4000, 200, 1), torch.float32)
triton_poi_fused_add_gelu_3[grid(16000)](buf9, primals_6, buf10,
16000, XBLOCK=256, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((80, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (80, 200), (200, 1), 0),
reinterpret_tensor(primals_7, (200, 4), (1, 200), 0), out=buf11)
buf12 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf64 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_4[
grid(4)](buf3, buf11, primals_8, buf12, buf13, buf64, 4, 80,
XBLOCK=1, num_warps=2, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 20), (80, 20, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(80, 4)](buf3, buf11,
primals_8, buf12, buf13, buf15, 80, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 20),
(20, 1), 0), reinterpret_tensor(primals_9, (20, 4), (1, 20), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_6[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((16, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 20), (1, 4), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 20), (80, 1, 4), torch.float32)
triton_poi_fused_add_7[grid(80, 4)](buf3, buf11, primals_8, buf18,
primals_12, buf19, 80, 4, XBLOCK=4, YBLOCK=32, num_warps=4,
num_stages=1)
del primals_12
del primals_8
buf20 = buf13
del buf13
buf21 = buf12
del buf12
buf63 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_8[grid(4)
](buf19, buf20, buf21, buf63, 4, 80, XBLOCK=1, num_warps=2,
num_stages=1)
buf23 = reinterpret_tensor(buf18, (4, 4, 20), (80, 20, 1), 0)
del buf18
triton_poi_fused_native_layer_norm_9[grid(16, 20)](buf19, buf20,
buf21, buf23, 16, 20, XBLOCK=32, YBLOCK=16, num_warps=4,
num_stages=1)
buf24 = reinterpret_tensor(buf11, (4, 20, 4), (80, 4, 1), 0)
del buf11
triton_poi_fused_clone_10[grid(80, 4)](buf23, buf24, 80, 4, XBLOCK=
4, YBLOCK=32, num_warps=4, num_stages=1)
buf25 = empty_strided_cuda((80, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (80, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 200), (1, 4), 0), out=buf25)
buf26 = empty_strided_cuda((4, 20, 200), (4000, 200, 1), torch.float32)
triton_poi_fused_add_gelu_3[grid(16000)](buf25, primals_14, buf26,
16000, XBLOCK=256, num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((80, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (80, 200), (200, 1), 0),
reinterpret_tensor(primals_15, (200, 4), (1, 200), 0), out=buf27)
buf28 = buf21
del buf21
buf29 = buf20
del buf20
buf62 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_4[
grid(4)](buf19, buf27, primals_16, buf28, buf29, buf62, 4, 80,
XBLOCK=1, num_warps=2, num_stages=1)
buf31 = empty_strided_cuda((4, 4, 20), (80, 20, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(80, 4)](buf19, buf27,
primals_16, buf28, buf29, buf31, 80, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
buf32 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_18, reinterpret_tensor(buf31, (16, 20),
(20, 1), 0), reinterpret_tensor(primals_17, (20, 4), (1, 20), 0
), alpha=1, beta=1, out=buf32)
del primals_18
buf33 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_6[grid(64)](buf32, buf33, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((16, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf33, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_19, (4, 20), (1, 4), 0), out=buf34)
buf35 = empty_strided_cuda((4, 4, 20), (80, 1, 4), torch.float32)
triton_poi_fused_add_7[grid(80, 4)](buf19, buf27, primals_16, buf34,
primals_20, buf35, 80, 4, XBLOCK=4, YBLOCK=32, num_warps=4,
num_stages=1)
del primals_16
del primals_20
buf36 = buf29
del buf29
buf37 = buf28
del buf28
buf61 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_8[grid(4)
](buf35, buf36, buf37, buf61, 4, 80, XBLOCK=1, num_warps=2,
num_stages=1)
buf39 = reinterpret_tensor(buf34, (4, 4, 20), (80, 20, 1), 0)
del buf34
triton_poi_fused_native_layer_norm_9[grid(16, 20)](buf35, buf36,
buf37, buf39, 16, 20, XBLOCK=32, YBLOCK=16, num_warps=4,
num_stages=1)
buf40 = reinterpret_tensor(buf27, (4, 20, 4), (80, 4, 1), 0)
del buf27
triton_poi_fused_clone_10[grid(80, 4)](buf39, buf40, 80, 4, XBLOCK=
4, YBLOCK=32, num_warps=4, num_stages=1)
buf41 = empty_strided_cuda((80, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf40, (80, 4), (4, 1), 0),
reinterpret_tensor(primals_21, (4, 200), (1, 4), 0), out=buf41)
buf42 = empty_strided_cuda((4, 20, 200), (4000, 200, 1), torch.float32)
triton_poi_fused_add_gelu_3[grid(16000)](buf41, primals_22, buf42,
16000, XBLOCK=256, num_warps=4, num_stages=1)
buf43 = reinterpret_tensor(buf19, (80, 4), (4, 1), 0)
del buf19
extern_kernels.mm(reinterpret_tensor(buf42, (80, 200), (200, 1), 0),
reinterpret_tensor(primals_23, (200, 4), (1, 200), 0), out=buf43)
buf44 = buf37
del buf37
buf45 = buf36
del buf36
buf60 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_4[
grid(4)](buf35, buf43, primals_24, buf44, buf45, buf60, 4, 80,
XBLOCK=1, num_warps=2, num_stages=1)
buf47 = empty_strided_cuda((4, 4, 20), (80, 20, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(80, 4)](buf35, buf43,
primals_24, buf44, buf45, buf47, 80, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
buf48 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_26, reinterpret_tensor(buf47, (16, 20),
(20, 1), 0), reinterpret_tensor(primals_25, (20, 4), (1, 20), 0
), alpha=1, beta=1, out=buf48)
del primals_26
buf49 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_6[grid(64)](buf48, buf49, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf50 = empty_strided_cuda((16, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf49, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 20), (1, 4), 0), out=buf50)
buf51 = empty_strided_cuda((4, 4, 20), (80, 1, 4), torch.float32)
triton_poi_fused_add_7[grid(80, 4)](buf35, buf43, primals_24, buf50,
primals_28, buf51, 80, 4, XBLOCK=4, YBLOCK=32, num_warps=4,
num_stages=1)
del buf35
del buf43
del primals_24
del primals_28
buf52 = buf45
del buf45
buf53 = buf44
del buf44
buf59 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_8[grid(4)
](buf51, buf52, buf53, buf59, 4, 80, XBLOCK=1, num_warps=2,
num_stages=1)
buf55 = reinterpret_tensor(buf50, (4, 4, 20), (80, 20, 1), 0)
del buf50
buf56 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf57 = buf56
del buf56
triton_per_fused_mean_native_layer_norm_11[grid(16)](buf57, buf51,
buf52, buf53, buf55, 16, 20, XBLOCK=1, num_warps=2, num_stages=1)
del buf51
del buf52
del buf53
buf58 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_30, buf57, reinterpret_tensor(
primals_29, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf58)
del primals_30
return (buf58, primals_3, primals_6, primals_14, primals_22,
reinterpret_tensor(buf0, (4, 2, 4), (8, 1, 2), 0), buf3, buf4, buf7,
reinterpret_tensor(buf8, (80, 4), (4, 1), 0), buf9,
reinterpret_tensor(buf10, (80, 200), (200, 1), 0), buf15, buf16,
reinterpret_tensor(buf17, (16, 4), (4, 1), 0), buf23,
reinterpret_tensor(buf24, (80, 4), (4, 1), 0), buf25,
reinterpret_tensor(buf26, (80, 200), (200, 1), 0), buf31, buf32,
reinterpret_tensor(buf33, (16, 4), (4, 1), 0), buf39,
reinterpret_tensor(buf40, (80, 4), (4, 1), 0), buf41,
reinterpret_tensor(buf42, (80, 200), (200, 1), 0), buf47, buf48,
reinterpret_tensor(buf49, (16, 4), (4, 1), 0), buf55, buf57,
primals_29, buf59, primals_27, primals_25, buf60, primals_23,
primals_21, buf61, primals_19, primals_17, buf62, primals_15,
primals_13, buf63, primals_11, primals_9, buf64, primals_7, primals_5)
class MlpBlock(nn.Module):
def __init__(self, features, hidden_dim):
super().__init__()
self.hidden_dim = hidden_dim
self.features = features
self.fc1 = nn.Linear(self.features, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, int(self.features))
def forward(self, x):
y = self.fc1(x)
y = F.gelu(y)
return self.fc2(y)
class MixerBlock(nn.Module):
"""Mixer block layer"""
def __init__(self, tokens_dim, tokens_mlp_dim, channels_dim,
channels_mlp_dim):
super().__init__()
self.tokens_mlp_dim = tokens_mlp_dim
self.tokens_dim = tokens_dim
self.channels_mlp_dim = channels_mlp_dim
self.channels_dim = channels_dim
self.token_mixer = MlpBlock(self.tokens_dim, self.tokens_mlp_dim)
self.channel_mixer = MlpBlock(self.channels_dim, self.channels_mlp_dim)
def forward(self, x):
y = F.layer_norm(x, x.shape[1:])
y = torch.transpose(y, 1, 2)
y = self.token_mixer(y)
y = torch.transpose(y, 1, 2)
x = x + y
y = F.layer_norm(x, x.shape[1:])
return x + self.channel_mixer(y)
class MlpMixer(nn.Module):
"""Mixer architecture"""
def __init__(self, patches, feature_length, num_classes, num_blocks,
hidden_dim, tokens_mlp_dim, channels_mlp_dim):
super().__init__()
self.patches = patches
self.feature_length = feature_length
self.num_classes = num_classes
self.num_blocks = num_blocks
self.hidden_dim = hidden_dim
self.tokens_mlp_dim = tokens_mlp_dim
self.channels_mlp_dim = channels_mlp_dim
self.conv = nn.Conv1d(in_channels=self.patches, out_channels=self.
hidden_dim, kernel_size=1)
for nb in range(self.num_blocks):
setattr(self, 'mixerBlock_{}'.format(nb), MixerBlock(self.
feature_length, self.tokens_mlp_dim, self.hidden_dim, self.
channels_mlp_dim))
self.fc = nn.Linear(in_features=self.feature_length, out_features=
self.num_classes)
def forward(self, inputs):
x = torch.transpose(inputs, 1, 2)
x = self.conv(x)
x = torch.transpose(x, 1, 2)
for nb in range(self.num_blocks):
x = getattr(self, 'mixerBlock_{}'.format(nb))(x)
x = F.layer_norm(x, x.shape[1:])
x = torch.transpose(x, 1, 2)
x = torch.mean(x, dim=1)
return self.fc(x)
class OrMixerNew(nn.Module):
def __init__(self, n_layers, entity_dim):
super(OrMixerNew, self).__init__()
self.mlpMixer = MlpMixer(patches=2, feature_length=entity_dim,
num_classes=entity_dim, num_blocks=3, hidden_dim=20,
tokens_mlp_dim=200, channels_mlp_dim=4)
def forward(self, input_0, input_1):
primals_3 = self.mlpMixer.conv.weight
primals_4 = self.mlpMixer.conv.bias
primals_5 = self.mlpMixer.mixerBlock_0.token_mixer.fc1.weight
primals_6 = self.mlpMixer.mixerBlock_0.token_mixer.fc1.bias
primals_7 = self.mlpMixer.mixerBlock_0.token_mixer.fc2.weight
primals_8 = self.mlpMixer.mixerBlock_0.token_mixer.fc2.bias
primals_9 = self.mlpMixer.mixerBlock_0.channel_mixer.fc1.weight
primals_10 = self.mlpMixer.mixerBlock_0.channel_mixer.fc1.bias
primals_11 = self.mlpMixer.mixerBlock_0.channel_mixer.fc2.weight
primals_12 = self.mlpMixer.mixerBlock_0.channel_mixer.fc2.bias
primals_13 = self.mlpMixer.mixerBlock_1.token_mixer.fc1.weight
primals_14 = self.mlpMixer.mixerBlock_1.token_mixer.fc1.bias
primals_15 = self.mlpMixer.mixerBlock_1.token_mixer.fc2.weight
primals_16 = self.mlpMixer.mixerBlock_1.token_mixer.fc2.bias
primals_17 = self.mlpMixer.mixerBlock_1.channel_mixer.fc1.weight
primals_18 = self.mlpMixer.mixerBlock_1.channel_mixer.fc1.bias
primals_19 = self.mlpMixer.mixerBlock_1.channel_mixer.fc2.weight
primals_20 = self.mlpMixer.mixerBlock_1.channel_mixer.fc2.bias
primals_21 = self.mlpMixer.mixerBlock_2.token_mixer.fc1.weight
primals_22 = self.mlpMixer.mixerBlock_2.token_mixer.fc1.bias
primals_23 = self.mlpMixer.mixerBlock_2.token_mixer.fc2.weight
primals_24 = self.mlpMixer.mixerBlock_2.token_mixer.fc2.bias
primals_25 = self.mlpMixer.mixerBlock_2.channel_mixer.fc1.weight
primals_26 = self.mlpMixer.mixerBlock_2.channel_mixer.fc1.bias
primals_27 = self.mlpMixer.mixerBlock_2.channel_mixer.fc2.weight
primals_28 = self.mlpMixer.mixerBlock_2.channel_mixer.fc2.bias
primals_1 = self.mlpMixer.fc.weight
primals_30 = self.mlpMixer.fc.bias
primals_2 = input_0
primals_29 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0]
|
amayuelas/NNKGReasoning
|
OrMixer
| false
| 6,220
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
MlpMixer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MlpBlock(nn.Module):
def __init__(self, features, hidden_dim):
super().__init__()
self.hidden_dim = hidden_dim
self.features = features
self.fc1 = nn.Linear(self.features, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, int(self.features))
def forward(self, x):
y = self.fc1(x)
y = F.gelu(y)
return self.fc2(y)
class MixerBlock(nn.Module):
"""Mixer block layer"""
def __init__(self, tokens_dim, tokens_mlp_dim, channels_dim,
channels_mlp_dim):
super().__init__()
self.tokens_mlp_dim = tokens_mlp_dim
self.tokens_dim = tokens_dim
self.channels_mlp_dim = channels_mlp_dim
self.channels_dim = channels_dim
self.token_mixer = MlpBlock(self.tokens_dim, self.tokens_mlp_dim)
self.channel_mixer = MlpBlock(self.channels_dim, self.channels_mlp_dim)
def forward(self, x):
y = F.layer_norm(x, x.shape[1:])
y = torch.transpose(y, 1, 2)
y = self.token_mixer(y)
y = torch.transpose(y, 1, 2)
x = x + y
y = F.layer_norm(x, x.shape[1:])
return x + self.channel_mixer(y)
class MlpMixer(nn.Module):
"""Mixer architecture"""
def __init__(self, patches, feature_length, num_classes, num_blocks,
hidden_dim, tokens_mlp_dim, channels_mlp_dim):
super().__init__()
self.patches = patches
self.feature_length = feature_length
self.num_classes = num_classes
self.num_blocks = num_blocks
self.hidden_dim = hidden_dim
self.tokens_mlp_dim = tokens_mlp_dim
self.channels_mlp_dim = channels_mlp_dim
self.conv = nn.Conv1d(in_channels=self.patches, out_channels=self.
hidden_dim, kernel_size=1)
for nb in range(self.num_blocks):
setattr(self, 'mixerBlock_{}'.format(nb), MixerBlock(self.
feature_length, self.tokens_mlp_dim, self.hidden_dim, self.
channels_mlp_dim))
self.fc = nn.Linear(in_features=self.feature_length, out_features=
self.num_classes)
def forward(self, inputs):
x = torch.transpose(inputs, 1, 2)
x = self.conv(x)
x = torch.transpose(x, 1, 2)
for nb in range(self.num_blocks):
x = getattr(self, 'mixerBlock_{}'.format(nb))(x)
x = F.layer_norm(x, x.shape[1:])
x = torch.transpose(x, 1, 2)
x = torch.mean(x, dim=1)
return self.fc(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'patches': 4, 'feature_length': 4, 'num_classes': 4,
'num_blocks': 4, 'hidden_dim': 4, 'tokens_mlp_dim': 4,
'channels_mlp_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_clone_convolution_native_layer_norm_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex
r2 = rindex // 4
tmp0 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp23, xmask)
tl.store(out_ptr1 + (r3 + 16 * x0), tmp25, xmask)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_gelu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_layer_norm_native_layer_norm_backward_3(in_ptr0
, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex
r1 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = 0.0625
tmp27 = tmp25 * tmp26
tl.store(out_ptr2 + x0, tmp27, xmask)
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp20, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y1, ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + y1, ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 16.0
tmp9 = tmp7 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp6 * tmp12
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp13, xmask & ymask)
@triton.jit
def triton_poi_fused_gelu_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_7(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = 0.0625
tmp23 = tmp21 * tmp22
tl.store(out_ptr2 + x0, tmp23, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tl.store(out_ptr0 + (x2 + 4 * y3), tmp9, xmask & ymask)
@triton.jit
def triton_poi_fused_mean_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4, 4), (4, 1))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4, 4), (4, 1))
assert_size_stride(primals_25, (4,), (1,))
assert_size_stride(primals_26, (4, 4), (4, 1))
assert_size_stride(primals_27, (4,), (1,))
assert_size_stride(primals_28, (4, 4), (4, 1))
assert_size_stride(primals_29, (4,), (1,))
assert_size_stride(primals_30, (4, 4), (4, 1))
assert_size_stride(primals_31, (4,), (1,))
assert_size_stride(primals_32, (4, 4), (4, 1))
assert_size_stride(primals_33, (4,), (1,))
assert_size_stride(primals_34, (4, 4), (4, 1))
assert_size_stride(primals_35, (4,), (1,))
assert_size_stride(primals_36, (4, 4), (4, 1))
assert_size_stride(primals_37, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf6 = reinterpret_tensor(buf4, (4, 1, 1), (1, 1, 1), 0)
del buf4
buf7 = buf0
del buf0
triton_per_fused_clone_convolution_native_layer_norm_1[grid(4)](buf2,
buf6, primals_3, buf3, buf7, 4, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_3
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_2[grid(64)](buf8, primals_5, buf9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf12 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf80 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_3[
grid(4)](buf2, buf10, primals_7, buf11, buf12, buf80, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf2, buf10,
primals_7, buf11, buf12, buf14, 16, 4, XBLOCK=4, YBLOCK=8,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf14, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf15)
del primals_9
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_5[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17)
buf18 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_add_6[grid(16, 4)](buf2, buf10, primals_7, buf17,
primals_11, buf18, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1,
num_stages=1)
del primals_11
del primals_7
buf19 = buf12
del buf12
buf20 = buf11
del buf11
buf79 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_7[grid(4)
](buf18, buf19, buf20, buf79, 4, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf22 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0)
del buf17
triton_poi_fused_native_layer_norm_8[grid(16, 4)](buf18, buf19,
buf20, buf22, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1
)
buf23 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_convolution_0[grid(16, 4)](buf22, buf23, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf23, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf24)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_2[grid(64)](buf24, primals_13, buf25, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf26)
buf27 = buf20
del buf20
buf28 = buf19
del buf19
buf78 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_3[
grid(4)](buf18, buf26, primals_15, buf27, buf28, buf78, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf18, buf26,
primals_15, buf27, buf28, buf30, 16, 4, XBLOCK=4, YBLOCK=8,
num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_17, reinterpret_tensor(buf30, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf31)
del primals_17
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_5[grid(64)](buf31, buf32, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf32, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf33)
buf34 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_add_6[grid(16, 4)](buf18, buf26, primals_15, buf33,
primals_19, buf34, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1,
num_stages=1)
del primals_15
del primals_19
buf35 = buf28
del buf28
buf36 = buf27
del buf27
buf77 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_7[grid(4)
](buf34, buf35, buf36, buf77, 4, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf38 = reinterpret_tensor(buf33, (4, 4, 4), (16, 4, 1), 0)
del buf33
triton_poi_fused_native_layer_norm_8[grid(16, 4)](buf34, buf35,
buf36, buf38, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1
)
buf39 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0)
del buf26
triton_poi_fused_convolution_0[grid(16, 4)](buf38, buf39, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf40 = reinterpret_tensor(buf18, (16, 4), (4, 1), 0)
del buf18
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf40)
buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_2[grid(64)](buf40, primals_21, buf41, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf41, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_22, (4, 4), (1, 4), 0), out=buf42)
buf43 = buf36
del buf36
buf44 = buf35
del buf35
buf76 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_3[
grid(4)](buf34, buf42, primals_23, buf43, buf44, buf76, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf34, buf42,
primals_23, buf43, buf44, buf46, 16, 4, XBLOCK=4, YBLOCK=8,
num_warps=1, num_stages=1)
buf47 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_25, reinterpret_tensor(buf46, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_24, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf47)
del primals_25
buf48 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_5[grid(64)](buf47, buf48, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf49 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf48, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf49)
buf50 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_add_6[grid(16, 4)](buf34, buf42, primals_23, buf49,
primals_27, buf50, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1,
num_stages=1)
del primals_23
del primals_27
buf51 = buf44
del buf44
buf52 = buf43
del buf43
buf75 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_7[grid(4)
](buf50, buf51, buf52, buf75, 4, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf54 = reinterpret_tensor(buf49, (4, 4, 4), (16, 4, 1), 0)
del buf49
triton_poi_fused_native_layer_norm_8[grid(16, 4)](buf50, buf51,
buf52, buf54, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1
)
buf55 = reinterpret_tensor(buf42, (4, 4, 4), (16, 4, 1), 0)
del buf42
triton_poi_fused_convolution_0[grid(16, 4)](buf54, buf55, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf56 = reinterpret_tensor(buf34, (16, 4), (4, 1), 0)
del buf34
extern_kernels.mm(reinterpret_tensor(buf55, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_28, (4, 4), (1, 4), 0), out=buf56)
buf57 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_2[grid(64)](buf56, primals_29, buf57, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf58 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf57, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_30, (4, 4), (1, 4), 0), out=buf58)
buf59 = buf52
del buf52
buf60 = buf51
del buf51
buf74 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_add_native_layer_norm_native_layer_norm_backward_3[
grid(4)](buf50, buf58, primals_31, buf59, buf60, buf74, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf62 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf50, buf58,
primals_31, buf59, buf60, buf62, 16, 4, XBLOCK=4, YBLOCK=8,
num_warps=1, num_stages=1)
buf63 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_33, reinterpret_tensor(buf62, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_32, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf63)
del primals_33
buf64 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_5[grid(64)](buf63, buf64, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf65 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf64, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_34, (4, 4), (1, 4), 0), out=buf65)
buf66 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_add_6[grid(16, 4)](buf50, buf58, primals_31, buf65,
primals_35, buf66, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1,
num_stages=1)
del buf50
del buf58
del primals_31
del primals_35
buf67 = buf60
del buf60
buf68 = buf59
del buf59
buf73 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_7[grid(4)
](buf66, buf67, buf68, buf73, 4, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf70 = reinterpret_tensor(buf65, (4, 4, 4), (16, 4, 1), 0)
del buf65
triton_poi_fused_native_layer_norm_8[grid(16, 4)](buf66, buf67,
buf68, buf70, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1
)
del buf66
del buf67
del buf68
buf71 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mean_9[grid(16)](buf70, buf71, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf72 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_37, buf71, reinterpret_tensor(
primals_36, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf72)
del primals_37
return (buf72, primals_2, primals_5, primals_13, primals_21, primals_29,
reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf2, buf3,
buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8,
reinterpret_tensor(buf9, (16, 4), (4, 1), 0), buf14, buf15,
reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf22,
reinterpret_tensor(buf23, (16, 4), (4, 1), 0), buf24,
reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf30, buf31,
reinterpret_tensor(buf32, (16, 4), (4, 1), 0), buf38,
reinterpret_tensor(buf39, (16, 4), (4, 1), 0), buf40,
reinterpret_tensor(buf41, (16, 4), (4, 1), 0), buf46, buf47,
reinterpret_tensor(buf48, (16, 4), (4, 1), 0), buf54,
reinterpret_tensor(buf55, (16, 4), (4, 1), 0), buf56,
reinterpret_tensor(buf57, (16, 4), (4, 1), 0), buf62, buf63,
reinterpret_tensor(buf64, (16, 4), (4, 1), 0), buf70, buf71,
primals_36, buf73, primals_34, primals_32, buf74, primals_30,
primals_28, buf75, primals_26, primals_24, buf76, primals_22,
primals_20, buf77, primals_18, primals_16, buf78, primals_14,
primals_12, buf79, primals_10, primals_8, buf80, primals_6, primals_4)
class MlpBlock(nn.Module):
def __init__(self, features, hidden_dim):
super().__init__()
self.hidden_dim = hidden_dim
self.features = features
self.fc1 = nn.Linear(self.features, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, int(self.features))
def forward(self, x):
y = self.fc1(x)
y = F.gelu(y)
return self.fc2(y)
class MixerBlock(nn.Module):
"""Mixer block layer"""
def __init__(self, tokens_dim, tokens_mlp_dim, channels_dim,
channels_mlp_dim):
super().__init__()
self.tokens_mlp_dim = tokens_mlp_dim
self.tokens_dim = tokens_dim
self.channels_mlp_dim = channels_mlp_dim
self.channels_dim = channels_dim
self.token_mixer = MlpBlock(self.tokens_dim, self.tokens_mlp_dim)
self.channel_mixer = MlpBlock(self.channels_dim, self.channels_mlp_dim)
def forward(self, x):
y = F.layer_norm(x, x.shape[1:])
y = torch.transpose(y, 1, 2)
y = self.token_mixer(y)
y = torch.transpose(y, 1, 2)
x = x + y
y = F.layer_norm(x, x.shape[1:])
return x + self.channel_mixer(y)
class MlpMixerNew(nn.Module):
"""Mixer architecture"""
def __init__(self, patches, feature_length, num_classes, num_blocks,
hidden_dim, tokens_mlp_dim, channels_mlp_dim):
super().__init__()
self.patches = patches
self.feature_length = feature_length
self.num_classes = num_classes
self.num_blocks = num_blocks
self.hidden_dim = hidden_dim
self.tokens_mlp_dim = tokens_mlp_dim
self.channels_mlp_dim = channels_mlp_dim
self.conv = nn.Conv1d(in_channels=self.patches, out_channels=self.
hidden_dim, kernel_size=1)
for nb in range(self.num_blocks):
setattr(self, 'mixerBlock_{}'.format(nb), MixerBlock(self.
feature_length, self.tokens_mlp_dim, self.hidden_dim, self.
channels_mlp_dim))
self.fc = nn.Linear(in_features=self.feature_length, out_features=
self.num_classes)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.mixerBlock_0.token_mixer.fc1.weight
primals_5 = self.mixerBlock_0.token_mixer.fc1.bias
primals_6 = self.mixerBlock_0.token_mixer.fc2.weight
primals_7 = self.mixerBlock_0.token_mixer.fc2.bias
primals_8 = self.mixerBlock_0.channel_mixer.fc1.weight
primals_9 = self.mixerBlock_0.channel_mixer.fc1.bias
primals_10 = self.mixerBlock_0.channel_mixer.fc2.weight
primals_11 = self.mixerBlock_0.channel_mixer.fc2.bias
primals_12 = self.mixerBlock_1.token_mixer.fc1.weight
primals_13 = self.mixerBlock_1.token_mixer.fc1.bias
primals_14 = self.mixerBlock_1.token_mixer.fc2.weight
primals_15 = self.mixerBlock_1.token_mixer.fc2.bias
primals_16 = self.mixerBlock_1.channel_mixer.fc1.weight
primals_17 = self.mixerBlock_1.channel_mixer.fc1.bias
primals_18 = self.mixerBlock_1.channel_mixer.fc2.weight
primals_19 = self.mixerBlock_1.channel_mixer.fc2.bias
primals_20 = self.mixerBlock_2.token_mixer.fc1.weight
primals_21 = self.mixerBlock_2.token_mixer.fc1.bias
primals_22 = self.mixerBlock_2.token_mixer.fc2.weight
primals_23 = self.mixerBlock_2.token_mixer.fc2.bias
primals_24 = self.mixerBlock_2.channel_mixer.fc1.weight
primals_25 = self.mixerBlock_2.channel_mixer.fc1.bias
primals_26 = self.mixerBlock_2.channel_mixer.fc2.weight
primals_27 = self.mixerBlock_2.channel_mixer.fc2.bias
primals_28 = self.mixerBlock_3.token_mixer.fc1.weight
primals_29 = self.mixerBlock_3.token_mixer.fc1.bias
primals_30 = self.mixerBlock_3.token_mixer.fc2.weight
primals_31 = self.mixerBlock_3.token_mixer.fc2.bias
primals_32 = self.mixerBlock_3.channel_mixer.fc1.weight
primals_33 = self.mixerBlock_3.channel_mixer.fc1.bias
primals_34 = self.mixerBlock_3.channel_mixer.fc2.weight
primals_35 = self.mixerBlock_3.channel_mixer.fc2.bias
primals_36 = self.fc.weight
primals_37 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37])
return output[0]
|
amayuelas/NNKGReasoning
|
MlpMixer
| false
| 6,221
|
[
"MIT"
] | 1
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
|
Building_Block
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class Building_Block(nn.Module):
def __init__(self, input_features, hidden_features, num_heads, dropout):
super(Building_Block, self).__init__()
self.input_features = input_features
self.hidden_features = hidden_features
self.embed_dim = 100 * self.hidden_features
self.attn = MultiHeadAttention(num_heads, self.embed_dim, dropout)
self.pos_ffn = FeedForward(self.embed_dim, dropout=dropout)
self.norm_1 = Norm(self.embed_dim)
self.norm_2 = Norm(self.embed_dim)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
x2 = self.norm_1(x)
new_x = x + self.dropout_1(self.attn(x2, x2, x2))
x2 = self.norm_2(new_x)
new_x = new_x + self.dropout_2(self.pos_ffn(x2))
return new_x
def get_inputs():
return [torch.rand([4, 400])]
def get_init_inputs():
return [[], {'input_features': 4, 'hidden_features': 4, 'num_heads': 4,
'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 400
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 400 * x0), rmask, other=0.0)
tmp26 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last',
other=0.0)
tmp30 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last',
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.where(rmask, tmp1, 0)
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp1, [RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp10 = tl.full([1], 400, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = tl.where(rmask, tmp15, 0)
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp19 = 400.0
tmp20 = tmp4 / tmp19
tmp21 = 399.0
tmp22 = tmp18 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-06
tmp25 = tmp23 + tmp24
tmp27 = tmp0 - tmp20
tmp28 = tmp26 * tmp27
tmp29 = tmp28 / tmp25
tmp31 = tmp29 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, None)
tl.store(out_ptr0 + (r1 + 400 * x0), tmp31, rmask)
@triton.jit
def triton_red_fused_add_div_mean_mul_std_sub_1(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 16
rnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4
x1 = xindex // 4
_tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
tmp6_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 400 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r2 + 400 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = _tmp4 + tmp3
_tmp4 = tl.where(rmask & xmask, tmp5, _tmp4)
tmp6_mean_next, tmp6_m2_next, tmp6_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp6_mean, tmp6_m2, tmp6_weight, roffset == 0)
)
tmp6_mean = tl.where(rmask & xmask, tmp6_mean_next, tmp6_mean)
tmp6_m2 = tl.where(rmask & xmask, tmp6_m2_next, tmp6_m2)
tmp6_weight = tl.where(rmask & xmask, tmp6_weight_next, tmp6_weight)
tmp4 = tl.sum(_tmp4, 1)[:, None]
tmp6_tmp, tmp7_tmp, tmp8_tmp = triton_helpers.welford(tmp6_mean,
tmp6_m2, tmp6_weight, 1)
tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tmp8_tmp[:, None]
tmp9 = 400.0
tmp10 = tmp4 / tmp9
tmp11 = 399.0
tmp12 = tmp7 / tmp11
tmp13 = libdevice.sqrt(tmp12)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp10, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp13, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp14 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp15 = tl.load(in_ptr0 + (r2 + 400 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (r2 + 400 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = tmp17 - tmp10
tmp19 = tmp14 * tmp18
tmp20 = 1e-06
tmp21 = tmp13 + tmp20
tmp22 = tmp19 / tmp21
tmp24 = tmp22 + tmp23
tl.store(out_ptr0 + (r2 + 400 * x3), tmp24, rmask & xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 1600
x0 = xindex % 400
x2 = xindex // 1600
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 400 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_out_ptr0 + x4, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x4, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (400,), (1,))
assert_size_stride(primals_2, (4, 400), (400, 1))
assert_size_stride(primals_3, (400,), (1,))
assert_size_stride(primals_4, (400, 400), (400, 1))
assert_size_stride(primals_5, (400,), (1,))
assert_size_stride(primals_6, (400, 400), (400, 1))
assert_size_stride(primals_7, (400,), (1,))
assert_size_stride(primals_8, (400, 400), (400, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (400, 400), (400, 1))
assert_size_stride(primals_11, (400,), (1,))
assert_size_stride(primals_12, (400,), (1,))
assert_size_stride(primals_13, (400,), (1,))
assert_size_stride(primals_14, (2048, 400), (400, 1))
assert_size_stride(primals_15, (2048,), (1,))
assert_size_stride(primals_16, (400, 2048), (2048, 1))
assert_size_stride(primals_17, (400,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(4)](buf1, buf5,
primals_2, primals_1, primals_3, buf6, 4, 400, num_warps=4,
num_stages=1)
del primals_1
del primals_3
buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.addmm(primals_5, buf6, reinterpret_tensor(primals_4,
(400, 400), (1, 400), 0), alpha=1, beta=1, out=buf7)
del primals_5
buf8 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6,
(400, 400), (1, 400), 0), alpha=1, beta=1, out=buf8)
del primals_7
buf9 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.addmm(primals_9, buf6, reinterpret_tensor(primals_8,
(400, 400), (1, 400), 0), alpha=1, beta=1, out=buf9)
del primals_9
buf10 = torch.ops.aten._scaled_dot_product_efficient_attention.default(
reinterpret_tensor(buf8, (4, 4, 1, 100), (400, 100, 400, 1), 0),
reinterpret_tensor(buf7, (4, 4, 1, 100), (400, 100, 400, 1), 0),
reinterpret_tensor(buf9, (4, 4, 1, 100), (400, 100, 400, 1), 0),
None, True, scale=0.1)
buf11 = buf10[0]
buf12 = buf10[1]
buf13 = buf10[2]
buf14 = buf10[3]
del buf10
buf15 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 400),
(400, 1), 0), reinterpret_tensor(primals_10, (400, 400), (1,
400), 0), alpha=1, beta=1, out=buf15)
del primals_11
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf19 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 1), 0)
del buf16
buf21 = reinterpret_tensor(buf19, (4, 4, 1), (4, 1, 1), 0)
del buf19
buf22 = empty_strided_cuda((4, 4, 400), (1600, 400, 1), torch.float32)
triton_red_fused_add_div_mean_mul_std_sub_1[grid(16)](buf17, buf21,
primals_2, buf15, primals_12, primals_13, buf22, 16, 400,
XBLOCK=1, RBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf23 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf22, (16, 400), (400, 1), 0),
reinterpret_tensor(primals_14, (400, 2048), (1, 400), 0), out=buf23
)
buf24 = reinterpret_tensor(buf23, (4, 4, 2048), (8192, 2048, 1), 0)
del buf23
buf27 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(32768)](buf24,
primals_15, buf27, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf25 = empty_strided_cuda((16, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_16, (2048, 400), (1, 2048), 0),
out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 400), (1600, 400, 1), 0)
del buf25
triton_poi_fused_add_3[grid(6400)](buf26, primals_2, buf15,
primals_17, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
return (buf26, primals_2, primals_12, buf1, buf5, buf6,
reinterpret_tensor(buf7, (4, 4, 1, 100), (400, 100, 400, 1), 0),
reinterpret_tensor(buf8, (4, 4, 1, 100), (400, 100, 400, 1), 0),
reinterpret_tensor(buf9, (4, 4, 1, 100), (400, 100, 400, 1), 0),
buf11, buf12, buf13, buf14, buf15, buf17, buf21, reinterpret_tensor
(buf22, (16, 400), (400, 1), 0), reinterpret_tensor(buf24, (16,
2048), (2048, 1), 0), primals_16, buf27, primals_14, primals_10,
primals_8, primals_6, primals_4)
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class Building_BlockNew(nn.Module):
def __init__(self, input_features, hidden_features, num_heads, dropout):
super(Building_BlockNew, self).__init__()
self.input_features = input_features
self.hidden_features = hidden_features
self.embed_dim = 100 * self.hidden_features
self.attn = MultiHeadAttention(num_heads, self.embed_dim, dropout)
self.pos_ffn = FeedForward(self.embed_dim, dropout=dropout)
self.norm_1 = Norm(self.embed_dim)
self.norm_2 = Norm(self.embed_dim)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.attn.q_linear.weight
primals_1 = self.attn.q_linear.bias
primals_6 = self.attn.v_linear.weight
primals_3 = self.attn.v_linear.bias
primals_8 = self.attn.k_linear.weight
primals_5 = self.attn.k_linear.bias
primals_10 = self.attn.out.weight
primals_7 = self.attn.out.bias
primals_14 = self.pos_ffn.linear_1.weight
primals_15 = self.pos_ffn.linear_1.bias
primals_16 = self.pos_ffn.linear_2.weight
primals_9 = self.pos_ffn.linear_2.bias
primals_11 = self.norm_1.alpha
primals_12 = self.norm_1.bias
primals_13 = self.norm_2.alpha
primals_17 = self.norm_2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
and-smith/Vac-Scholar-Curb-GAN
|
Building_Block
| false
| 6,222
|
[
"MIT"
] | 1
|
142bd70fdf0f1cbc4a1c20c5e58fa5b6a9dbe742
|
https://github.com/and-smith/Vac-Scholar-Curb-GAN/tree/142bd70fdf0f1cbc4a1c20c5e58fa5b6a9dbe742
|
LossAttentionLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class LossAttentionLayer(nn.Module):
def __init__(self):
super(LossAttentionLayer, self).__init__()
def forward(self, features, W_1, b_1):
out_c = F.linear(features, W_1, b_1)
out = out_c - out_c.max()
out = out.exp()
out = out.sum(1, keepdim=True)
alpha = out / out.sum(0)
alpha01 = features.size(0) * alpha.expand_as(features)
context = torch.mul(features, alpha01)
return context, out_c, torch.squeeze(alpha)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
@triton.jit
def triton_per_fused_div_exp_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tmp0 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp5 - tmp2
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tmp9 - tmp2
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tmp13 - tmp2
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp12 + tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = tmp16 / tmp19
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp16, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp20, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 / tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tmp0 * tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(arg0_1, arg2_1, reinterpret_tensor(arg1_1, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_max_0[grid(1)](buf0, buf1, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((1,), (1,), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_per_fused_div_exp_sub_sum_1[grid(1)](buf0, buf1, buf2, buf3,
buf5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_2[grid(16)](arg2_1, buf2, buf3, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg2_1
del buf2
del buf3
return buf4, buf0, reinterpret_tensor(buf5, (4,), (1,), 0)
class LossAttentionLayerNew(nn.Module):
def __init__(self):
super(LossAttentionLayerNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1], output[2]
|
apardyl/ProtoPNet
|
LossAttentionLayer
| false
| 6,223
|
[
"MIT"
] | 1
|
b2bbd7284bfc84a37385c0e975408c68cdf64205
|
https://github.com/apardyl/ProtoPNet/tree/b2bbd7284bfc84a37385c0e975408c68cdf64205
|
KLLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
def kl_loss(x, y):
x = F.softmax(x.detach(), dim=1)
y = F.log_softmax(y, dim=1)
return torch.mean(torch.sum(x * (torch.log(x) - y), dim=1))
class KLLoss(nn.Module):
def forward(self, x, y):
return kl_loss(x, y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_log_mean_mul_sub_sum_3(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp23 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp29 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 - tmp14
tmp16 = tmp0 * tmp15
tmp18 = tl_math.log(tmp17)
tmp19 = tmp4 - tmp13
tmp20 = tmp18 - tmp19
tmp21 = tmp17 * tmp20
tmp22 = tmp16 + tmp21
tmp24 = tl_math.log(tmp23)
tmp25 = tmp7 - tmp13
tmp26 = tmp24 - tmp25
tmp27 = tmp23 * tmp26
tmp28 = tmp22 + tmp27
tmp30 = tl_math.log(tmp29)
tmp31 = tmp10 - tmp13
tmp32 = tmp30 - tmp31
tmp33 = tmp29 * tmp32
tmp34 = tmp28 + tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = tl.sum(tmp35, 1)[:, None]
tmp38 = 64.0
tmp39 = tmp37 / tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__log_softmax_2[grid(256)](arg1_1, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused__log_softmax_log_mean_mul_sub_sum_3[grid(1)](buf5,
buf1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
del buf2
return buf5,
def kl_loss(x, y):
x = F.softmax(x.detach(), dim=1)
y = F.log_softmax(y, dim=1)
return torch.mean(torch.sum(x * (torch.log(x) - y), dim=1))
class KLLossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
anurag1paul/pseudo_lidar
|
KLLoss
| false
| 6,224
|
[
"MIT"
] | 1
|
02faf327efd43c986629d0ea797b058e464c05aa
|
https://github.com/anurag1paul/pseudo_lidar/tree/02faf327efd43c986629d0ea797b058e464c05aa
|
Time2Vec
|
import torch
from torch import nn
class Time2Vec(nn.Module):
"""Encode time information
phi and omega has k + 1 elements per each time step
so, from input (batch_size, sample_size) will be
ouptut (batch_size, sample_size, embed_size)
Reference
* https://arxiv.org/abs/1907.05321
* https://github.com/ojus1/Time2Vec-PyTorch
"""
def __init__(self, input_size, embed_size):
super().__init__()
self.input_size = input_size
self.embed_size = embed_size
self.lin = nn.Linear(self.input_size, 1)
self.nonlin = nn.Linear(self.input_size, self.embed_size - 1)
self.F = torch.sin
def forward(self, x):
"""Compute following equation
t2v(t)[i] = omega[i] * x[t] + phi[i] if i == 0
t2v(t)[i] = f(omega[i] * x[t] + phi[i]) if 1 <= i <= k
so, just applying Linear layer twice
x: (batch_size, feature_size, sample_size)
v1: (batch_size, feature_size, 1)
v2: (batch_size, feature_size, embed_size-1)
"""
_ = x.size(0)
v1 = self.lin(x)
v2 = self.F(self.nonlin(x))
return torch.cat([v1, v2], dim=2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'embed_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (3 * x1 + (-1 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl_math.sin(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp6, tmp10, tmp11)
tmp13 = tl.where(tmp4, tmp5, tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (3, 4), (4, 1))
assert_size_stride(primals_5, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((16, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 3), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
return buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf2
class Time2VecNew(nn.Module):
"""Encode time information
phi and omega has k + 1 elements per each time step
so, from input (batch_size, sample_size) will be
ouptut (batch_size, sample_size, embed_size)
Reference
* https://arxiv.org/abs/1907.05321
* https://github.com/ojus1/Time2Vec-PyTorch
"""
def __init__(self, input_size, embed_size):
super().__init__()
self.input_size = input_size
self.embed_size = embed_size
self.lin = nn.Linear(self.input_size, 1)
self.nonlin = nn.Linear(self.input_size, self.embed_size - 1)
self.F = torch.sin
def forward(self, input_0):
primals_2 = self.lin.weight
primals_3 = self.lin.bias
primals_4 = self.nonlin.weight
primals_5 = self.nonlin.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
appleparan/mise.py
|
Time2Vec
| false
| 6,225
|
[
"MIT"
] | 1
|
a77ea51be37a739928600c66d168d69b78bc0c4b
|
https://github.com/appleparan/mise.py/tree/a77ea51be37a739928600c66d168d69b78bc0c4b
|
MatrixLayer
|
import torch
import torch.nn as nn
class ActionPool(nn.Module):
"""
Basic pooling operations.
"""
def __init__(self, axis, function='mean', expand=True):
super(ActionPool, self).__init__()
self.expand = expand
self._function_name = function
self._axis_name = axis
if isinstance(axis, str):
self.dim = {'row': 0, 'column': 1, 'both': None}[axis]
else:
self.dim = axis
if function == 'max':
self.function = lambda x, dim: torch.max(x, dim=dim)[0]
elif function == 'sum':
self.function = lambda x, dim: torch.sum(x, dim=dim)
elif function == 'mean':
self.function = torch.mean
else:
raise ValueError('Unrecognised function: %s' % function)
def forward(self, input):
if self.dim is None:
n, p, i, j = input.size()
input = input.contiguous()
reshaped = input.view(n, p, i * j)
output = self.function(reshaped, dim=2)
if self.expand:
output = output.unsqueeze(2).unsqueeze(2)
else:
output = self.function(input, dim=self.dim + 2)
if self.expand:
output = output.unsqueeze(self.dim + 2)
if self.expand:
return output.expand_as(input)
else:
return output
class MatrixLinear(nn.Linear):
"""
Matrix-based linear feed-forward layer. Think of it as the
matrix analog to a feed-forward layer in an MLP.
"""
def forward(self, input):
n, p, i, j = input.size()
input.permute(0, 2, 3, 1)
state = input.view((n * i * j, p))
output = super(MatrixLinear, self).forward(state)
output = output.view((n, i, j, self.out_features)).permute(0, 3, 1, 2)
return output
class MatrixLayer(nn.Module):
"""
Set layers are linear layers with pooling. Pooling operations
are defined above.
"""
def __init__(self, in_features, out_features, pooling='max', axes=[
'row', 'column', 'both'], name='', debug=False, dropout=0.0):
super(MatrixLayer, self).__init__()
pool = []
for axis in axes:
pool.append(ActionPool(axis, pooling, expand=True))
self.pool = pool
self.dropout = dropout
self.linear = MatrixLinear(in_features * (1 + len(pool)), out_features)
if dropout > 0.0:
self.dropoutlayer = nn.Dropout2d(dropout)
self.name = name
self.debug = debug
def forward(self, input):
pooled = [p(input) for p in self.pool]
state = torch.cat([input] + pooled, dim=1)
if self.dropout > 0.0:
state = self.dropoutlayer(state)
if self.debug:
None
return self.linear(state)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_cat_max_0(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + (r1 + 16 * x2 + 256 * x3), tmp0, xmask)
tl.store(out_ptr2 + (r1 + 16 * x2 + 256 * x3), tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex // 16
x3 = xindex // 64
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x4), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x4), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x4), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x4), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x5 + 256 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x2 = xindex // 64
x4 = xindex % 64
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x4 + 256 * x2), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
buf2 = reinterpret_tensor(buf6, (4, 4, 4, 4), (256, 16, 4, 1), 0)
buf5 = reinterpret_tensor(buf6, (4, 4, 4, 4), (256, 16, 4, 1), 192)
get_raw_stream(0)
triton_per_fused_cat_max_0[grid(16)](primals_1, buf2, buf5, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf3 = reinterpret_tensor(buf6, (4, 4, 4, 4), (256, 16, 4, 1), 64)
triton_poi_fused_cat_1[grid(256)](primals_1, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf6, (4, 4, 4, 4), (256, 16, 4, 1), 128)
triton_poi_fused_cat_2[grid(256)](primals_1, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf6, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf7)
del primals_2
del primals_3
return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 1, 16, 4), 0
), reinterpret_tensor(buf6, (64, 16), (16, 1), 0)
class ActionPool(nn.Module):
"""
Basic pooling operations.
"""
def __init__(self, axis, function='mean', expand=True):
super(ActionPool, self).__init__()
self.expand = expand
self._function_name = function
self._axis_name = axis
if isinstance(axis, str):
self.dim = {'row': 0, 'column': 1, 'both': None}[axis]
else:
self.dim = axis
if function == 'max':
self.function = lambda x, dim: torch.max(x, dim=dim)[0]
elif function == 'sum':
self.function = lambda x, dim: torch.sum(x, dim=dim)
elif function == 'mean':
self.function = torch.mean
else:
raise ValueError('Unrecognised function: %s' % function)
def forward(self, input):
if self.dim is None:
n, p, i, j = input.size()
input = input.contiguous()
reshaped = input.view(n, p, i * j)
output = self.function(reshaped, dim=2)
if self.expand:
output = output.unsqueeze(2).unsqueeze(2)
else:
output = self.function(input, dim=self.dim + 2)
if self.expand:
output = output.unsqueeze(self.dim + 2)
if self.expand:
return output.expand_as(input)
else:
return output
class MatrixLinear(nn.Linear):
"""
Matrix-based linear feed-forward layer. Think of it as the
matrix analog to a feed-forward layer in an MLP.
"""
def forward(self, input):
n, p, i, j = input.size()
input.permute(0, 2, 3, 1)
state = input.view((n * i * j, p))
output = super(MatrixLinear, self).forward(state)
output = output.view((n, i, j, self.out_features)).permute(0, 3, 1, 2)
return output
class MatrixLayerNew(nn.Module):
"""
Set layers are linear layers with pooling. Pooling operations
are defined above.
"""
def __init__(self, in_features, out_features, pooling='max', axes=[
'row', 'column', 'both'], name='', debug=False, dropout=0.0):
super(MatrixLayerNew, self).__init__()
pool = []
for axis in axes:
pool.append(ActionPool(axis, pooling, expand=True))
self.pool = pool
self.dropout = dropout
self.linear = MatrixLinear(in_features * (1 + len(pool)), out_features)
if dropout > 0.0:
self.dropoutlayer = nn.Dropout2d(dropout)
self.name = name
self.debug = debug
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
anuar12/deep_game_theory
|
MatrixLayer
| false
| 6,226
|
[
"MIT"
] | 1
|
1debe5a498fe5f017f2791965a5e529b0dfb0529
|
https://github.com/anuar12/deep_game_theory/tree/1debe5a498fe5f017f2791965a5e529b0dfb0529
|
TVLoss
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class TVLoss(nn.Module):
def forward(self, input):
input = F.pad(input, (0, 1, 0, 1), 'replicate')
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
diff = x_diff ** 2 + y_diff ** 2 + 1e-08
return diff.mean(dim=1).sqrt().mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4 % 4
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 64 * r2 +
(3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 64 * r2 +
(3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp4 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 + r1 <
3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp10 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp11 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp14 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp20 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp21 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp24 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp30 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp31 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp34 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp15 = tmp14 - tmp11
tmp16 = tmp15 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp17 + tmp8
tmp19 = tmp9 + tmp18
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp25 = tmp24 - tmp21
tmp26 = tmp25 * tmp25
tmp27 = tmp23 + tmp26
tmp28 = tmp27 + tmp8
tmp29 = tmp19 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp35
tmp37 = tmp33 + tmp36
tmp38 = tmp37 + tmp8
tmp39 = tmp29 + tmp38
tmp40 = 4.0
tmp41 = tmp39 / tmp40
tmp42 = libdevice.sqrt(tmp41)
tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp45 = tl.sum(tmp43, 1)[:, None]
tmp46 = 64.0
tmp47 = tmp45 / tmp46
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp47, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0[grid(1)](buf2, arg0_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
aradalienzzzz/vqgan-clip-app
|
TVLoss
| false
| 6,227
|
[
"MIT"
] | 1
|
f5a16d792da5ad0ede855254fe393f6b990c8e1d
|
https://github.com/aradalienzzzz/vqgan-clip-app/tree/f5a16d792da5ad0ede855254fe393f6b990c8e1d
|
StdConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3,
primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_1[grid(16)](buf6, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, primals_1, primals_3, buf3, buf4
class StdConv2dNew(nn.Conv2d):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
aris-mukherjee/TransUNet-modified
|
StdConv2d
| false
| 6,228
|
[
"Apache-2.0"
] | 1
|
185307b677fd6ee05604213c90e14e028fab476a
|
https://github.com/aris-mukherjee/TransUNet-modified/tree/185307b677fd6ee05604213c90e14e028fab476a
|
LearnedPositionalEmbedding1D
|
import torch
from torch import nn
import torch.nn
import torch.autograd
class LearnedPositionalEmbedding1D(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs."""
def __init__(self, seq_len, dim):
super().__init__()
self.pos_embedding = nn.Parameter(torch.zeros(1, seq_len, dim))
def forward(self, x):
"""Input has shape `(batch_size, seq_len, emb_dim)`"""
return x + self.pos_embedding
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'seq_len': 4, 'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnedPositionalEmbedding1DNew(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs."""
def __init__(self, seq_len, dim):
super().__init__()
self.pos_embedding = nn.Parameter(torch.zeros(1, seq_len, dim))
def forward(self, input_0):
primals_1 = self.pos_embedding
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
arkel23/yuwu
|
LearnedPositionalEmbedding1D
| false
| 6,229
|
[
"MIT"
] | 1
|
4dcf0e18693e09a947569ddcc7cb3ff00c7c674a
|
https://github.com/arkel23/yuwu/tree/4dcf0e18693e09a947569ddcc7cb3ff00c7c674a
|
BasicBlock
|
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj, D=None):
if x.dim() == 3:
xw = torch.matmul(x, self.weight)
output = torch.bmm(adj, xw)
elif x.dim() == 2:
xw = torch.mm(x, self.weight)
output = torch.spmm(adj, xw)
if D is not None:
output = output * 1.0 / D
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, dropout=0.0):
super(BasicBlock, self).__init__()
self.gc = GraphConv(inplanes, planes)
self.relu = nn.ReLU(inplace=True)
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
else:
self.dropout = None
def forward(self, x, adj, D=None):
x = self.gc(x, adj, D)
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj, D=None):
if x.dim() == 3:
xw = torch.matmul(x, self.weight)
output = torch.bmm(adj, xw)
elif x.dim() == 2:
xw = torch.mm(x, self.weight)
output = torch.spmm(adj, xw)
if D is not None:
output = output * 1.0 / D
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class BasicBlockNew(nn.Module):
def __init__(self, inplanes, planes, dropout=0.0):
super(BasicBlockNew, self).__init__()
self.gc = GraphConv(inplanes, planes)
self.relu = nn.ReLU(inplace=True)
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
else:
self.dropout = None
def forward(self, input_0, input_1):
primals_1 = self.gc.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ardihikaru/learn-to-cluster
|
BasicBlock
| false
| 6,230
|
[
"MIT"
] | 1
|
d7a5ea0946f7b402f8878bfd608bf3e0dc9a26ca
|
https://github.com/ardihikaru/learn-to-cluster/tree/d7a5ea0946f7b402f8878bfd608bf3e0dc9a26ca
|
BasicBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
class FilterResponseNorm2d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-06, learnable_eps=False):
super(FilterResponseNorm2d, self).__init__(4, num_features, eps=eps,
learnable_eps=learnable_eps)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.bn1 = FilterResponseNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = FilterResponseNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), FilterResponseNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tl.full([1, 1], 0, tl.int32)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp20, xmask)
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = tmp18 + tmp19
tmp21 = tl.full([1, 1], 0, tl.int32)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp23 = 0.0
tmp24 = tmp22 <= tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp22, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp24, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0[grid(16)](
buf2, buf0, primals_3, primals_4, primals_5, primals_6, buf3,
16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf6 = reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1[
grid(16)](buf6, buf4, primals_8, primals_9, primals_10,
primals_11, primals_2, buf7, buf8, 16, 16, XBLOCK=8, num_warps=
2, num_stages=1)
del primals_8
return (buf7, primals_1, primals_2, primals_4, primals_5, primals_6,
primals_7, primals_9, primals_10, primals_11, buf0, buf2, buf3,
buf4, buf6, buf8)
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
class FilterResponseNorm2d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-06, learnable_eps=False):
super(FilterResponseNorm2d, self).__init__(4, num_features, eps=eps,
learnable_eps=learnable_eps)
class BasicBlockNew(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.bn1 = FilterResponseNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = FilterResponseNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), FilterResponseNorm2d(self.expansion * planes))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.bn1.eps
primals_4 = self.bn1.gamma
primals_5 = self.bn1.beta
primals_6 = self.bn1.tau
primals_7 = self.conv2.weight
primals_8 = self.bn2.eps
primals_9 = self.bn2.gamma
primals_10 = self.bn2.beta
primals_11 = self.bn2.tau
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning
|
BasicBlock
| false
| 6,231
|
[
"MIT"
] | 1
|
78aec81919bf95ed4677d0e0a4ebbbe3be455742
|
https://github.com/aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning/tree/78aec81919bf95ed4677d0e0a4ebbbe3be455742
|
Embedding
|
import torch
import numpy as np
import torch as t
import torch.nn as nn
import torch.utils.data
class Embedding(nn.Module):
"""
Redefining torch.nn.Embedding (see docs for that function)
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
_weight=None):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
if _weight is None:
self.weight = nn.Parameter(t.randn([self.num_embeddings, self.
embedding_dim]) / np.sqrt(self.num_embeddings))
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim
], 'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = nn.Parameter(_weight)
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, x):
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
return self.weight[x]
def get_inputs():
return [torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch as t
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(16)](primals_2, primals_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf0, primals_2
class EmbeddingNew(nn.Module):
"""
Redefining torch.nn.Embedding (see docs for that function)
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
_weight=None):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
if _weight is None:
self.weight = nn.Parameter(t.randn([self.num_embeddings, self.
embedding_dim]) / np.sqrt(self.num_embeddings))
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim
], 'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = nn.Parameter(_weight)
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
arjunsesh/lrr-neurips
|
Embedding
| false
| 6,232
|
[
"MIT"
] | 1
|
d78106daec1e729b02a0452f74a37bf004ed243c
|
https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c
|
Attention
|
import torch
import torch.nn.functional as F
from torch import nn
class Attention(nn.Module):
"""Attention Layer merging Encoder and Decoder
Attributes:
hidden_size (int):
The number of features in the hidden state h
Reference:
* https://github.com/bentrevett/pytorch-seq2seq
"""
def __init__(self, hidden_size):
super().__init__()
self.attn = nn.Linear(hidden_size + hidden_size, hidden_size)
self.v = nn.Linear(hidden_size, 1, bias=False)
def forward(self, hidden, encoder_outputs):
"""
Args:
hidden: previous hidden state of the decoder [batch_size, hidden_size]
encoder_outputs: outputs of encoder
[batch_size, sample_size, num_directions*hidden_size]
"""
sample_size = encoder_outputs.shape[1]
hidden = hidden.squeeze(0)
hidden = hidden.unsqueeze(1).repeat(1, sample_size, 1)
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs),
dim=2)))
attention = self.v(energy).squeeze(2)
return F.softmax(attention, dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = xindex // 32
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
return buf5, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf2, buf5, primals_5
class AttentionNew(nn.Module):
"""Attention Layer merging Encoder and Decoder
Attributes:
hidden_size (int):
The number of features in the hidden state h
Reference:
* https://github.com/bentrevett/pytorch-seq2seq
"""
def __init__(self, hidden_size):
super().__init__()
self.attn = nn.Linear(hidden_size + hidden_size, hidden_size)
self.v = nn.Linear(hidden_size, 1, bias=False)
def forward(self, input_0, input_1):
primals_3 = self.attn.weight
primals_4 = self.attn.bias
primals_5 = self.v.weight
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
appleparan/mise.py
|
Attention
| false
| 6,233
|
[
"MIT"
] | 1
|
a77ea51be37a739928600c66d168d69b78bc0c4b
|
https://github.com/appleparan/mise.py/tree/a77ea51be37a739928600c66d168d69b78bc0c4b
|
FilterResponseNormNd
|
import torch
import torch.nn as nn
import torch.utils.data.distributed
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ndim': 4, 'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0[grid(16)](buf1,
primals_1, primals_2, primals_3, primals_4, primals_5, buf2, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, primals_4, primals_5, buf1
class FilterResponseNormNdNew(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNdNew, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
def forward(self, input_0):
primals_2 = self.eps
primals_3 = self.gamma
primals_4 = self.beta
primals_5 = self.tau
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning
|
FilterResponseNormNd
| false
| 6,234
|
[
"MIT"
] | 1
|
78aec81919bf95ed4677d0e0a4ebbbe3be455742
|
https://github.com/aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning/tree/78aec81919bf95ed4677d0e0a4ebbbe3be455742
|
PrimaryCapsules
|
import torch
import torch.nn as nn
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class PrimaryCapsules(nn.Module):
def __init__(self, in_features, capsules_num, capsules_dim):
super(PrimaryCapsules, self).__init__()
self.in_features = in_features
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.conv = nn.Conv2d(in_features, capsules_num * capsules_dim,
kernel_size=9, stride=2)
def forward(self, x):
batch_size = x.shape[0]
x = self.conv(x)
"""
Since all capsules use the same convolution operations, just do once and reshape.
"""
x = x.view(batch_size, -1, self.capsules_dim)
x = squash(x)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_features': 4, 'capsules_num': 4, 'capsules_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (16, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_3, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(50176)](buf1, primals_3, 50176,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 3136, 4), (12544, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_1[grid(50176)](buf1, buf2,
50176, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf1
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class PrimaryCapsulesNew(nn.Module):
def __init__(self, in_features, capsules_num, capsules_dim):
super(PrimaryCapsulesNew, self).__init__()
self.in_features = in_features
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.conv = nn.Conv2d(in_features, capsules_num * capsules_dim,
kernel_size=9, stride=2)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashawkey/CapsNet.pytorch
|
PrimaryCapsules
| false
| 6,235
|
[
"MIT"
] | 1
|
3b796b572bbabe79cc445c35913cd3584733aedf
|
https://github.com/ashawkey/CapsNet.pytorch/tree/3b796b572bbabe79cc445c35913cd3584733aedf
|
MNL
|
import torch
import torch.nn as nn
import torch.utils.data
class MNL(nn.Module):
"""
Implementation of MNL choice model as a Pytorch module
"""
def __init__(self, n):
super(MNL, self).__init__()
self.u = nn.Parameter(torch.nn.init.normal(torch.Tensor(n)))
self.n = n
self.m = nn.Softmax()
def forward(self, x):
"""
computes the PCMC choice probabilities P(.,S)
Args:
x- indicator vector for choice set S, i.e. a 'size(S)-hot' encoding of the
choice set, or a batch of such encodings
"""
u = x * self.u + (1 - x) * -16
p = self.m(u)
return torch.log(p / torch.sum(p))
def __str__(self):
return 'MNL'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp20 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp3 - tmp8
tmp11 = tmp10 * tmp5
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp15 = tmp14 * tmp1
tmp16 = tmp3 - tmp14
tmp17 = tmp16 * tmp5
tmp18 = tmp15 + tmp17
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp21 = tmp20 * tmp1
tmp22 = tmp3 - tmp20
tmp23 = tmp22 * tmp5
tmp24 = tmp21 + tmp23
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tmp26 = tmp7 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp12 - tmp25
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp18 - tmp25
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp24 - tmp25
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tl.store(out_ptr0 + x4, tmp25, xmask)
tl.store(out_ptr1 + x4, tmp36, xmask)
@triton.jit
def triton_per_fused__softmax_add_div_log_mul_rsub_sum_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r4 = rindex
r0 = rindex % 4
r3 = rindex // 64
r5 = rindex % 16
tmp0 = tl.load(in_ptr0 + r4, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (r5 + 16 * r3), None, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr3 + (r5 + 16 * r3), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tmp12 / tmp15
tmp17 = tl_math.log(tmp16)
tl.store(out_ptr1 + tl.broadcast_to(r4, [RBLOCK]), tmp17, None)
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_mul_rsub_0[grid(64)](primals_2,
primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__softmax_add_div_log_mul_rsub_sum_1[grid(1)](primals_2
, primals_1, buf0, buf1, buf2, buf3, 1, 256, num_warps=2,
num_stages=1)
del buf0
del buf1
return buf3, primals_1, primals_2, buf2
class MNLNew(nn.Module):
"""
Implementation of MNL choice model as a Pytorch module
"""
def __init__(self, n):
super(MNLNew, self).__init__()
self.u = nn.Parameter(torch.nn.init.normal(torch.Tensor(n)))
self.n = n
self.m = nn.Softmax()
def __str__(self):
return 'MNL'
def forward(self, input_0):
primals_1 = self.u
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
arjunsesh/lrr-neurips
|
MNL
| false
| 6,236
|
[
"MIT"
] | 1
|
d78106daec1e729b02a0452f74a37bf004ed243c
|
https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c
|
ConcatELU
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConcatELU(nn.Module):
"""Activation function that applies ELU in both direction (inverted and plain).
Allows non-linearity while providing strong gradients for any input (important for final convolution)
"""
def forward(self, x):
return torch.cat([F.elu(x), F.elu(-x)], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp18 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp19 = -tmp18
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp15, tmp24, tmp25)
tmp27 = tl.where(tmp4, tmp14, tmp26)
tl.store(out_ptr0 + x3, tmp27, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ConcatELUNew(nn.Module):
"""Activation function that applies ELU in both direction (inverted and plain).
Allows non-linearity while providing strong gradients for any input (important for final convolution)
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ashutoshml/lightning-tutorials
|
ConcatELU
| false
| 6,237
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
MLP
|
import torch
import torch.nn as nn
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLP(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLPNew(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashim95/parser
|
MLP
| false
| 6,238
|
[
"MIT"
] | 1
|
61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
BP
|
import torch
import torch.nn as nn
import torch.utils.data
class BP(nn.Module):
"""
Implementation of the Bastell-Polking k-th order model as a pytorch module
"""
def __init__(self, n, k, d):
"""
Initializes a k-th order Batsell-Polking model
Args:
n- number of items in universe
k- order of the model
d- rank of the model
"""
super(BP, self).__init__()
shape = tuple([n] * k)
self.U = nn.Parameter(torch.nn.init.normal_(torch.Tensor(*shape)))
self.k = k
self.n = n
self.d = d
self.m = nn.LogSoftmax()
def forward(self, x):
"""
Computes choice probabilities for k-th order BP model
"""
utils = self.U
for _ in range(self.k - 1):
utils = torch.matmul(utils, torch.squeeze(x))
utils = x * utils + (1 - x) * -16
return self.m(utils)
def __str__(self):
return 'BP:k=' + str(self.k)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4, 'k': 4, 'd': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp3 - tmp8
tmp12 = tmp11 * tmp5
tmp13 = tmp10 + tmp12
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp3 - tmp15
tmp19 = tmp18 * tmp5
tmp20 = tmp17 + tmp19
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp24 = tmp22 * tmp23
tmp25 = tmp3 - tmp22
tmp26 = tmp25 * tmp5
tmp27 = tmp24 + tmp26
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr1 + x2, tmp39, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_mul_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp8 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp11 = tl_math.log(tmp10)
tmp12 = tmp9 - tmp11
tl.store(in_out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(primals_2, (16, 4, 4),
(16, 4, 1), 0), out=buf1)
buf2 = buf0
del buf0
extern_kernels.bmm(buf1, reinterpret_tensor(primals_2, (16, 4, 4),
(16, 4, 1), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_add_mul_rsub_0[grid(64)](primals_2,
buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_add_mul_rsub_1[grid(256)](buf5,
primals_2, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del buf4
return buf5, primals_2, buf5
class BPNew(nn.Module):
"""
Implementation of the Bastell-Polking k-th order model as a pytorch module
"""
def __init__(self, n, k, d):
"""
Initializes a k-th order Batsell-Polking model
Args:
n- number of items in universe
k- order of the model
d- rank of the model
"""
super(BPNew, self).__init__()
shape = tuple([n] * k)
self.U = nn.Parameter(torch.nn.init.normal_(torch.Tensor(*shape)))
self.k = k
self.n = n
self.d = d
self.m = nn.LogSoftmax()
def __str__(self):
return 'BP:k=' + str(self.k)
def forward(self, input_0):
primals_1 = self.U
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
arjunsesh/lrr-neurips
|
BP
| false
| 6,239
|
[
"MIT"
] | 1
|
d78106daec1e729b02a0452f74a37bf004ed243c
|
https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c
|
ScalarMix
|
import torch
import torch.nn as nn
class ScalarMix(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, tensors):
"""
Args:
tensors (list[~torch.Tensor]):
:math:`N` tensors to be mixed.
Returns:
The mixture of :math:`N` tensors.
"""
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tmp8 + tmp9
tmp11 = tmp1 * tmp10
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(64)](primals_3, primals_1,
primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4,
4), (16, 4, 1), 0)
class ScalarMixNew(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, input_0):
primals_1 = self.weights
primals_3 = self.gamma
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashim95/parser
|
ScalarMix
| false
| 6,240
|
[
"MIT"
] | 1
|
61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
CapsuleLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CapsuleLoss(nn.Module):
def __init__(self):
super(CapsuleLoss, self).__init__()
def forward(self, inputs, labels, logits, recons):
batch_size = inputs.shape[0]
left = F.relu(0.9 - logits, inplace=True) ** 2
right = F.relu(logits - 0.1, inplace=True) ** 2
margin_loss = labels * left + 0.5 * (1.0 - labels) * right
margin_loss = margin_loss.sum()
recons_loss = ((inputs.view(batch_size, -1) - recons) ** 2).sum()
return (margin_loss + 0.0005 * recons_loss) / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 64]), torch.rand([4, 4, 4, 4]), torch.rand
([4, 4, 4, 4]), torch.rand([4, 4, 4, 1024])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 0.9
tmp3 = tmp2 - tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp5 * tmp5
tmp7 = tmp0 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp0
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 0.1
tmp13 = tmp1 - tmp12
tmp14 = triton_helpers.maximum(tmp4, tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp11 * tmp15
tmp17 = tmp7 + tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
@triton.jit
def triton_red_fused_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 8
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + r1 % 4096, rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = _tmp5 + tmp4
_tmp5 = tl.where(rmask & xmask, tmp6, _tmp5)
tmp5 = tl.sum(_tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_2(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_out_ptr0 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = 0.0005
tmp7 = tmp3 * tmp6
tmp8 = tmp5 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 64), (1024, 256, 64, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 1024), (16384, 4096, 1024, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0[grid(1)](arg2_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((8,), (1,), torch.float32)
triton_red_fused_pow_sub_sum_1[grid(8)](arg0_1, arg3_1, buf1, 8,
8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del arg0_1
del arg3_1
buf3 = buf0
del buf0
triton_per_fused_add_div_mul_pow_sub_sum_2[grid(1)](buf3, buf1, 1,
8, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf3,
class CapsuleLossNew(nn.Module):
def __init__(self):
super(CapsuleLossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
ashawkey/CapsNet.pytorch
|
CapsuleLoss
| false
| 6,241
|
[
"MIT"
] | 1
|
3b796b572bbabe79cc445c35913cd3584733aedf
|
https://github.com/ashawkey/CapsNet.pytorch/tree/3b796b572bbabe79cc445c35913cd3584733aedf
|
BCEIoULoss
|
import torch
from typing import Callable
from functools import partial
from torch import nn
import torch.distributed
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.backends
def get_activation_fn(activation: 'str'=None):
"""Returns the activation function from ``torch.nn`` by its name."""
if activation is None or activation.lower() == 'none':
def activation_fn(x):
return x
else:
activation_fn = torch.nn.__dict__[activation]()
return activation_fn
def wrap_metric_fn_with_activation(metric_fn: 'Callable', activation: 'str'
=None):
"""Wraps model outputs for ``metric_fn` with specified ``activation``.
Args:
metric_fn: metric function to compute
activation: activation name to use
Returns:
wrapped metric function with wrapped model outputs
.. note::
Works only with ``metric_fn`` like
``metric_fn(outputs, targets, *args, **kwargs)``.
"""
activation_fn = get_activation_fn(activation)
def wrapped_metric_fn(outputs: 'torch.Tensor', targets: 'torch.Tensor',
*args, **kwargs):
outputs = activation_fn(outputs)
output = metric_fn(outputs, targets, *args, **kwargs)
return output
return wrapped_metric_fn
def iou(outputs: 'torch.Tensor', targets: 'torch.Tensor', class_dim: 'int'=
1, threshold: 'float'=None, eps: 'float'=1e-07) ->torch.Tensor:
"""Computes the iou/jaccard score.
Args:
outputs: [N; K; ...] tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary [N; K; ...] tensort that encodes which of the K
classes are associated with the N-th input
class_dim: indicates class dimention (K) for
``outputs`` and ``targets`` tensors (default = 1)
threshold: threshold for outputs binarization
eps: epsilon to avoid zero division
Returns:
IoU (Jaccard) score
Examples:
>>> size = 4
>>> half_size = size // 2
>>> shape = (1, 1, size, size)
>>> empty = torch.zeros(shape)
>>> full = torch.ones(shape)
>>> left = torch.ones(shape)
>>> left[:, :, :, half_size:] = 0
>>> right = torch.ones(shape)
>>> right[:, :, :, :half_size] = 0
>>> top_left = torch.zeros(shape)
>>> top_left[:, :, :half_size, :half_size] = 1
>>> pred = torch.cat([empty, left, empty, full, left, top_left], dim=1)
>>> targets = torch.cat([full, right, empty, full, left, left], dim=1)
>>> iou(
>>> outputs=pred,
>>> targets=targets,
>>> class_dim=1,
>>> threshold=0.5,
>>> )
tensor([0.0000, 0.0000, 1.0000, 1.0000, 1.0000, 0.5])
"""
if threshold is not None:
outputs = (outputs > threshold).float()
num_dims = len(outputs.shape)
assert num_dims > 2, 'shape mismatch, please check the docs for more info'
assert outputs.shape == targets.shape, 'shape mismatch, please check the docs for more info'
dims = list(range(num_dims))
if class_dim < 0:
class_dim = num_dims + class_dim
dims.pop(class_dim)
sum_fn = partial(torch.sum, dim=dims)
intersection = sum_fn(targets * outputs)
union = sum_fn(targets) + sum_fn(outputs)
iou_score = (intersection + eps * (union == 0).float()) / (union -
intersection + eps)
return iou_score
class IoULoss(nn.Module):
"""The intersection over union (Jaccard) loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
"""
super().__init__()
metric_fn = wrap_metric_fn_with_activation(metric_fn=iou,
activation=activation)
self.loss_fn = partial(metric_fn, eps=eps, threshold=threshold)
def forward(self, outputs, targets):
"""@TODO: Docs. Contribution is welcome."""
per_class_iou = self.loss_fn(outputs, targets)
iou = torch.mean(per_class_iou)
return 1 - iou
class BCEIoULoss(nn.Module):
"""The Intersection over union (Jaccard) with BCE loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid', reduction: 'str'='mean'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
reduction: Specifies the reduction to apply
to the output of BCE
"""
super().__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
self.iou_loss = IoULoss(eps, threshold, activation)
def forward(self, outputs, targets):
"""@TODO: Docs. Contribution is welcome."""
iou = self.iou_loss.forward(outputs, targets)
bce = self.bce_loss(outputs, targets)
loss = iou + bce
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import Callable
from functools import partial
from torch import nn
import torch.distributed
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.load(in_ptr2 + r0, None)
tmp3 = tmp1 + tmp2
tmp4 = 0.0
tmp5 = tmp3 == tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = 1e-07
tmp8 = tmp6 * tmp7
tmp9 = tmp0 + tmp8
tmp10 = tmp3 - tmp0
tmp11 = tmp10 + tmp7
tmp12 = tmp9 / tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
@triton.jit
def triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp16 = tl.load(in_out_ptr0 + 0)
tmp17 = tl.broadcast_to(tmp16, [1])
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp18 = 4.0
tmp19 = tmp17 / tmp18
tmp20 = tmp1 - tmp19
tmp21 = 256.0
tmp22 = tmp15 / tmp21
tmp23 = tmp20 + tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sigmoid_sum_0[grid(4)](arg1_1, arg0_1, buf0,
buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1[grid(1)](buf0,
buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
buf5 = buf3
del buf3
triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2[
grid(1)](buf5, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf5,
def get_activation_fn(activation: 'str'=None):
"""Returns the activation function from ``torch.nn`` by its name."""
if activation is None or activation.lower() == 'none':
def activation_fn(x):
return x
else:
activation_fn = torch.nn.__dict__[activation]()
return activation_fn
def wrap_metric_fn_with_activation(metric_fn: 'Callable', activation: 'str'
=None):
"""Wraps model outputs for ``metric_fn` with specified ``activation``.
Args:
metric_fn: metric function to compute
activation: activation name to use
Returns:
wrapped metric function with wrapped model outputs
.. note::
Works only with ``metric_fn`` like
``metric_fn(outputs, targets, *args, **kwargs)``.
"""
activation_fn = get_activation_fn(activation)
def wrapped_metric_fn(outputs: 'torch.Tensor', targets: 'torch.Tensor',
*args, **kwargs):
outputs = activation_fn(outputs)
output = metric_fn(outputs, targets, *args, **kwargs)
return output
return wrapped_metric_fn
def iou(outputs: 'torch.Tensor', targets: 'torch.Tensor', class_dim: 'int'=
1, threshold: 'float'=None, eps: 'float'=1e-07) ->torch.Tensor:
"""Computes the iou/jaccard score.
Args:
outputs: [N; K; ...] tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary [N; K; ...] tensort that encodes which of the K
classes are associated with the N-th input
class_dim: indicates class dimention (K) for
``outputs`` and ``targets`` tensors (default = 1)
threshold: threshold for outputs binarization
eps: epsilon to avoid zero division
Returns:
IoU (Jaccard) score
Examples:
>>> size = 4
>>> half_size = size // 2
>>> shape = (1, 1, size, size)
>>> empty = torch.zeros(shape)
>>> full = torch.ones(shape)
>>> left = torch.ones(shape)
>>> left[:, :, :, half_size:] = 0
>>> right = torch.ones(shape)
>>> right[:, :, :, :half_size] = 0
>>> top_left = torch.zeros(shape)
>>> top_left[:, :, :half_size, :half_size] = 1
>>> pred = torch.cat([empty, left, empty, full, left, top_left], dim=1)
>>> targets = torch.cat([full, right, empty, full, left, left], dim=1)
>>> iou(
>>> outputs=pred,
>>> targets=targets,
>>> class_dim=1,
>>> threshold=0.5,
>>> )
tensor([0.0000, 0.0000, 1.0000, 1.0000, 1.0000, 0.5])
"""
if threshold is not None:
outputs = (outputs > threshold).float()
num_dims = len(outputs.shape)
assert num_dims > 2, 'shape mismatch, please check the docs for more info'
assert outputs.shape == targets.shape, 'shape mismatch, please check the docs for more info'
dims = list(range(num_dims))
if class_dim < 0:
class_dim = num_dims + class_dim
dims.pop(class_dim)
sum_fn = partial(torch.sum, dim=dims)
intersection = sum_fn(targets * outputs)
union = sum_fn(targets) + sum_fn(outputs)
iou_score = (intersection + eps * (union == 0).float()) / (union -
intersection + eps)
return iou_score
class IoULoss(nn.Module):
"""The intersection over union (Jaccard) loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
"""
super().__init__()
metric_fn = wrap_metric_fn_with_activation(metric_fn=iou,
activation=activation)
self.loss_fn = partial(metric_fn, eps=eps, threshold=threshold)
def forward(self, outputs, targets):
"""@TODO: Docs. Contribution is welcome."""
per_class_iou = self.loss_fn(outputs, targets)
iou = torch.mean(per_class_iou)
return 1 - iou
class BCEIoULossNew(nn.Module):
"""The Intersection over union (Jaccard) with BCE loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid', reduction: 'str'='mean'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
reduction: Specifies the reduction to apply
to the output of BCE
"""
super().__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
self.iou_loss = IoULoss(eps, threshold, activation)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
arjunshibu/catalyst
|
BCEIoULoss
| false
| 6,242
|
[
"Apache-2.0"
] | 1
|
7160540f09530b803e5664e57db3e951fdc4dab3
|
https://github.com/arjunshibu/catalyst/tree/7160540f09530b803e5664e57db3e951fdc4dab3
|
Sigmoid
|
import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Sigmoid(ActivationFunction):
def forward(self, x):
return 1 / (1 + torch.exp(-x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_exp_mul_neg_reciprocal_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tmp6 * tmp3
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_mul_neg_reciprocal_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class SigmoidNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ashutoshml/lightning-tutorials
|
Sigmoid
| false
| 6,243
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
L2Norm
|
import torch
import torch.nn as nn
class L2Norm(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = x / norm * self.weight.view(1, -1, 1, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ashuk203/face-alignment
|
L2Norm
| false
| 6,244
|
[
"BSD-3-Clause"
] | 1
|
1f6452ae05ede0db9bbc48331d67d8b239fa9994
|
https://github.com/ashuk203/face-alignment/tree/1f6452ae05ede0db9bbc48331d67d8b239fa9994
|
Biaffine
|
import torch
import torch.nn as nn
class Biaffine(nn.Module):
"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashim95/parser
|
Biaffine
| false
| 6,245
|
[
"MIT"
] | 1
|
61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
CDM
|
import torch
import torch.nn as nn
import torch.utils.data
class CDM(nn.Module):
"""
Implementation of the CDM choice model as a Pytorch module
"""
def __init__(self, n, d):
"""
Initializes a CDM model
Args:
n- number of items in the universe
d- number of dimensions for feature and context embeddings
"""
super(CDM, self).__init__()
self.fs = nn.Parameter(torch.nn.init.normal(torch.Tensor(n, d)))
self.cs = nn.Parameter(torch.nn.init.normal(torch.Tensor(d, n)))
self.m = nn.LogSoftmax()
self.d = d
self.n = n
def forward(self, x):
"""
computes the CDM choice probabilities P(.,S)
Args:
x- indicator vector for choice set S, i.e. a 'size(S)-hot' encoding of the
choice set, or a batch of these
"""
u = x * torch.sum(torch.mm(self.fs, x * self.cs), dim=1) + (1 - x
) * -16
p = self.m(u)
return p
def __str__(self):
return 'CDM-d=' + str(self.d)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n': 4, 'd': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp0
tmp11 = -16.0
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_3, primals_2, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sum_1[grid(16)](primals_3, buf1, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__log_softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__log_softmax_3[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, primals_3, buf4, reinterpret_tensor(primals_1, (4, 4), (1,
4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class CDMNew(nn.Module):
"""
Implementation of the CDM choice model as a Pytorch module
"""
def __init__(self, n, d):
"""
Initializes a CDM model
Args:
n- number of items in the universe
d- number of dimensions for feature and context embeddings
"""
super(CDMNew, self).__init__()
self.fs = nn.Parameter(torch.nn.init.normal(torch.Tensor(n, d)))
self.cs = nn.Parameter(torch.nn.init.normal(torch.Tensor(d, n)))
self.m = nn.LogSoftmax()
self.d = d
self.n = n
def __str__(self):
return 'CDM-d=' + str(self.d)
def forward(self, input_0):
primals_1 = self.fs
primals_2 = self.cs
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
arjunsesh/lrr-neurips
|
CDM
| false
| 6,246
|
[
"MIT"
] | 1
|
d78106daec1e729b02a0452f74a37bf004ed243c
|
https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c
|
Tanh
|
import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Tanh(ActivationFunction):
def forward(self, x):
x_exp, neg_x_exp = torch.exp(x), torch.exp(-x)
return (x_exp - neg_x_exp) / (x_exp + neg_x_exp)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_exp_neg_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tmp1 + tmp3
tmp6 = tmp4 / tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_exp_neg_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class TanhNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ashutoshml/lightning-tutorials
|
Tanh
| false
| 6,247
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
LayerNorm
|
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import *
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=0.0001):
super(LayerNorm, self).__init__()
self.alpha = Parameter(torch.ones(1, 1, hidden_size))
self.beta = Parameter(torch.zeros(1, 1, hidden_size))
self.eps = eps
def forward(self, x):
mu = torch.mean(x, 2, keepdim=True).expand_as(x)
sigma = torch.std(x, 2, keepdim=True).expand_as(x)
return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x
) + self.beta.expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.optim.lr_scheduler import *
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 0.0001
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
def __init__(self, hidden_size, eps=0.0001):
super(LayerNormNew, self).__init__()
self.alpha = Parameter(torch.ones(1, 1, hidden_size))
self.beta = Parameter(torch.zeros(1, 1, hidden_size))
self.eps = eps
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashishbaghudana/san_mrc
|
LayerNorm
| false
| 6,248
|
[
"BSD-3-Clause"
] | 1
|
03ed7d94c735f1fe2854bb9c208385b5fde44905
|
https://github.com/ashishbaghudana/san_mrc/tree/03ed7d94c735f1fe2854bb9c208385b5fde44905
|
ReLU
|
import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ReLU(ActivationFunction):
def forward(self, x):
return x * (x > 0).float()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_gt_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ReLUNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ashutoshml/lightning-tutorials
|
ReLU
| false
| 6,249
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
ELU
|
import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ELU(ActivationFunction):
def forward(self, x):
return torch.where(x > 0, x, torch.exp(x) - 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_gt_sub_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_gt_sub_where_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ELUNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ashutoshml/lightning-tutorials
|
ELU
| false
| 6,250
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
LeakyReLU
|
import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class LeakyReLU(ActivationFunction):
def __init__(self, alpha=0.1):
super().__init__()
self.config['alpha'] = alpha
def forward(self, x):
return torch.where(x > 0, x, self.config['alpha'] * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gt_mul_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.1
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gt_mul_where_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class LeakyReLUNew(ActivationFunction):
def __init__(self, alpha=0.1):
super().__init__()
self.config['alpha'] = alpha
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ashutoshml/lightning-tutorials
|
LeakyReLU
| false
| 6,251
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
MultiplyLuminance
|
import torch
import torch.nn
class MultiplyLuminance(torch.nn.Module):
def __init__(self):
super(MultiplyLuminance, self).__init__()
def forward(self, color, luminance):
return color * (1 + luminance)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](arg1_1, arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MultiplyLuminanceNew(torch.nn.Module):
def __init__(self):
super(MultiplyLuminanceNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ashwinpn/Computer-Vision
|
MultiplyLuminance
| false
| 6,252
|
[
"MIT"
] | 1
|
9dc3abfe416385171b76e2bad6872e10f36a12b4
|
https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4
|
GCNLayer
|
import torch
import torch.nn as nn
class GCNLayer(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.projection = nn.Linear(c_in, c_out)
def forward(self, node_feats, adj_matrix):
"""
Args:
node_feats: Tensor with node features of shape [batch_size, num_nodes, c_in]
adj_matrix: Batch of adjacency matrices of the graph. If there is an edge from i to j,
adj_matrix[b,i,j]=1 else 0. Supports directed edges by non-symmetric matrices.
Assumes to already have added the identity connections.
Shape: [batch_size, num_nodes, num_nodes]
"""
num_neighbours = adj_matrix.sum(dim=-1, keepdims=True)
node_feats = self.projection(node_feats)
node_feats = torch.bmm(adj_matrix, node_feats)
node_feats = node_feats / num_neighbours
return node_feats
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'c_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 4), (
16, 4, 1), 0), out=buf1)
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(64)](buf2, primals_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf2, primals_1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0)
class GCNLayerNew(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.projection = nn.Linear(c_in, c_out)
def forward(self, input_0, input_1):
primals_2 = self.projection.weight
primals_3 = self.projection.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ashutoshml/lightning-tutorials
|
GCNLayer
| false
| 6,253
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
FillUpLuminance
|
import torch
import torch.nn
class FillUpLuminance(torch.nn.Module):
def __init__(self):
super(FillUpLuminance, self).__init__()
def forward(self, color, luminance):
return color + (1 - color) * luminance
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FillUpLuminanceNew(torch.nn.Module):
def __init__(self):
super(FillUpLuminanceNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ashwinpn/Computer-Vision
|
FillUpLuminance
| false
| 6,254
|
[
"MIT"
] | 1
|
9dc3abfe416385171b76e2bad6872e10f36a12b4
|
https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4
|
Triaffine
|
import torch
import torch.nn as nn
class Triaffine(nn.Module):
"""
Triaffine layer for second-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y`.
Usually, :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Yu Zhang, Zhenghua Li and Min Zhang. 2020.
`Efficient Second-Order TreeCRF for Neural Dependency Parsing`_.
- Xinyu Wang, Jingxian Huang, and Kewei Tu. 2019.
`Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.
Args:
n_in (int):
The size of the input feature.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
.. _Efficient Second-Order TreeCRF for Neural Dependency Parsing:
https://www.aclweb.org/anthology/2020.acl-main.302/
.. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:
https://www.aclweb.org/anthology/P19-1454/
"""
def __init__(self, n_in, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y, z):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
z (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, seq_len, seq_len, seq_len]``.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
w = torch.einsum('bzk,ikj->bzij', z, self.weight)
s = torch.einsum('bxi,bzij,byj->bzxy', x, w, y)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4,
1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0),
out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1),
torch.float32)
triton_poi_fused_clone_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16),
(64, 16, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1, 1), (64, 16, 4, 1,
1, 1), 0)
del buf2
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16),
(64, 16, 1), 0), out=buf5)
del buf4
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 1, 16), 0
), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0)
class TriaffineNew(nn.Module):
"""
Triaffine layer for second-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y`.
Usually, :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Yu Zhang, Zhenghua Li and Min Zhang. 2020.
`Efficient Second-Order TreeCRF for Neural Dependency Parsing`_.
- Xinyu Wang, Jingxian Huang, and Kewei Tu. 2019.
`Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.
Args:
n_in (int):
The size of the input feature.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
.. _Efficient Second-Order TreeCRF for Neural Dependency Parsing:
https://www.aclweb.org/anthology/2020.acl-main.302/
.. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:
https://www.aclweb.org/anthology/P19-1454/
"""
def __init__(self, n_in, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ashim95/parser
|
Triaffine
| false
| 6,255
|
[
"MIT"
] | 1
|
61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199
|
AvgReducePool1d
|
import torch
from torch import nn
class AvgReducePool1d(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.mean(input, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AvgReducePool1dNew(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
atif93/texar-pytorch
|
AvgReducePool1d
| false
| 6,256
|
[
"Apache-2.0"
] | 1
|
88163619ec69382e1bbe57fa8bce06260bfc76a2
|
https://github.com/atif93/texar-pytorch/tree/88163619ec69382e1bbe57fa8bce06260bfc76a2
|
CoSirenModule
|
import math
import torch
import torch.nn
class CoSirenModule(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(CoSirenModule, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
init_bounds = math.sqrt(24 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, x):
x = self.linear(x)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1) - math.pi / 4
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tmp17 = 0.7853981633974483
tmp18 = tmp16 - tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_sub_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class CoSirenModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(CoSirenModuleNew, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
init_bounds = math.sqrt(24 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashwinpn/Computer-Vision
|
CoSirenModule
| false
| 6,257
|
[
"MIT"
] | 1
|
9dc3abfe416385171b76e2bad6872e10f36a12b4
|
https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4
|
PotCoSirenModule
|
import torch
import torch.nn
class PotCoSirenModule(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(PotCoSirenModule, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
torch.nn.init.uniform_(self.linear.weight, a=-weight_multiplier, b=
weight_multiplier)
self.linear.weight.data = 2 ** self.linear.weight.data
def forward(self, x):
x = self.linear(x)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class PotCoSirenModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(PotCoSirenModuleNew, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
torch.nn.init.uniform_(self.linear.weight, a=-weight_multiplier, b=
weight_multiplier)
self.linear.weight.data = 2 ** self.linear.weight.data
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashwinpn/Computer-Vision
|
PotCoSirenModule
| false
| 6,258
|
[
"MIT"
] | 1
|
9dc3abfe416385171b76e2bad6872e10f36a12b4
|
https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4
|
Autoencoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.conv1 = nn.Conv2d(3, 6, padding=2, kernel_size=5)
self.maxpool1 = nn.MaxPool2d(4, stride=1, return_indices=True)
self.conv2 = nn.Conv2d(6, 16, padding=2, kernel_size=5)
self.maxpool2 = nn.MaxPool2d(2, stride=1, return_indices=True)
self.unpool2 = nn.MaxUnpool2d(2, stride=1)
self.unconv2 = nn.ConvTranspose2d(16, 6, padding=2, kernel_size=5)
self.unpool1 = nn.MaxUnpool2d(4, stride=1)
self.unconv1 = nn.ConvTranspose2d(6, 3, padding=2, kernel_size=5)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
x = self.unpool2(x, indices2)
x = F.relu(x)
x = self.unconv2(x)
x = self.unpool1(x, indices1)
x = F.relu(x)
x = self.unconv1(x)
return x
def encoder(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def decoder(self, x, indices1, indices2):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 6
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 89304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 61
x1 = xindex // 61 % 61
x2 = xindex // 3721
x3 = xindex % 3721
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (65 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (66 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (67 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp17 = tl.load(in_ptr0 + (129 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp19 = tl.load(in_ptr0 + (130 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp21 = tl.load(in_ptr0 + (131 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp23 = tl.load(in_ptr0 + (192 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp25 = tl.load(in_ptr0 + (193 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp27 = tl.load(in_ptr0 + (194 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp29 = tl.load(in_ptr0 + (195 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1], 1, tl.int8)
tmp33 = tl.full([1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.full([1], 4, tl.int32)
tmp78 = tl.where((tmp76 < 0) != (tmp77 < 0), tl.where(tmp76 % tmp77 !=
0, tmp76 // tmp77 - 1, tmp76 // tmp77), tmp76 // tmp77)
tmp79 = tmp78 * tmp77
tmp80 = tmp76 - tmp79
tmp81 = x1
tmp82 = tmp81 + tmp78
tmp83 = x0
tmp84 = tmp83 + tmp80
tmp85 = tl.full([1], 64, tl.int64)
tmp86 = tmp82 * tmp85
tmp87 = tmp86 + tmp84
tl.store(out_ptr0 + (x3 + 3744 * x2), tmp30, xmask)
tl.store(out_ptr2 + x4, tmp87, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 16
x0 = xindex % 3721
x4 = xindex // 3721
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3744 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 60
x1 = xindex // 60 % 60
x2 = xindex // 3600
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 61 * x1 + 3744 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 61 * x1 + 3744 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (61 + x0 + 61 * x1 + 3744 * x2), xmask)
tmp12 = tl.load(in_ptr0 + (62 + x0 + 61 * x1 + 3744 * x2), xmask)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 2, tl.int32)
tmp18 = tl.where((tmp15 < 0) != (tmp17 < 0), tl.where(tmp15 % tmp17 !=
0, tmp15 // tmp17 - 1, tmp15 // tmp17), tmp15 // tmp17)
tmp19 = tmp18 * tmp17
tmp20 = tmp15 - tmp19
tmp21 = x1
tmp22 = tmp21 + tmp18
tmp23 = x0
tmp24 = tmp23 + tmp20
tmp25 = tl.full([1], 61, tl.int64)
tmp26 = tmp22 * tmp25
tmp27 = tmp26 + tmp24
tl.store(out_ptr0 + x3, tmp27, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 89304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_7, (6,), (1,))
assert_size_stride(primals_8, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_9, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 64, 64), (24576, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(98304)](buf1, primals_2,
98304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 61, 61), (22464, 3744, 61, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 6, 61, 61), (22326, 3721, 61, 1),
torch.int64)
triton_poi_fused_max_pool2d_with_indices_1[grid(89304)](buf1, buf2,
buf4, 89304, XBLOCK=512, num_warps=8, num_stages=1)
buf5 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 61, 61), (59536, 3721, 61, 1))
buf6 = empty_strided_cuda((4, 16, 61, 61), (59904, 3744, 61, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(238144)](buf5, primals_5,
buf6, 238144, XBLOCK=512, num_warps=8, num_stages=1)
del buf5
del primals_5
buf7 = empty_strided_cuda((4, 16, 60, 60), (57600, 3600, 60, 1),
torch.int64)
buf8 = empty_strided_cuda((4, 16, 60, 60), (57600, 3600, 60, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(230400)](buf6, buf7,
buf8, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf9 = torch.ops.aten.max_unpool2d.default(buf8, buf7, [61, 61])
del buf8
buf10 = buf9
del buf9
buf11 = buf10
del buf10
triton_poi_fused_relu_4[grid(238144)](buf11, 238144, XBLOCK=1024,
num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 6, 61, 61), (22326, 3721, 61, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_5[grid(89304)](buf13, primals_7, 89304,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf14 = torch.ops.aten.max_unpool2d.default(buf13, buf4, [64, 64])
del buf13
buf15 = buf14
del buf14
buf16 = buf15
del buf15
triton_poi_fused_relu_6[grid(98304)](buf16, 98304, XBLOCK=1024,
num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_7[grid(49152)](buf18, primals_9, 49152,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
return (buf18, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf4, buf6, buf7, buf11, buf16)
class AutoencoderNew(nn.Module):
def __init__(self):
super(AutoencoderNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, padding=2, kernel_size=5)
self.maxpool1 = nn.MaxPool2d(4, stride=1, return_indices=True)
self.conv2 = nn.Conv2d(6, 16, padding=2, kernel_size=5)
self.maxpool2 = nn.MaxPool2d(2, stride=1, return_indices=True)
self.unpool2 = nn.MaxUnpool2d(2, stride=1)
self.unconv2 = nn.ConvTranspose2d(16, 6, padding=2, kernel_size=5)
self.unpool1 = nn.MaxUnpool2d(4, stride=1)
self.unconv1 = nn.ConvTranspose2d(6, 3, padding=2, kernel_size=5)
def encoder(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def decoder(self, x, indices1, indices2):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.unconv2.weight
primals_7 = self.unconv2.bias
primals_8 = self.unconv1.weight
primals_9 = self.unconv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
aoxolotl/slr
|
Autoencoder
| false
| 6,259
|
[
"MIT"
] | 1
|
20a4a9036f2dc3a61745072f89b0f5bb1cc51e1b
|
https://github.com/aoxolotl/slr/tree/20a4a9036f2dc3a61745072f89b0f5bb1cc51e1b
|
Embbed2
|
import torch
import torch.nn
class Embbed2(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(Embbed2, self).__init__()
self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features //
in_features) - 1
self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) *
self.b[:, None, None], [out_features, in_features]))
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def forward(self, x):
x = torch.matmul(x, self.b.T)
return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1
)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.sin(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr0 + (-4 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.cos(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, buf0, buf1, 512,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0
class Embbed2New(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(Embbed2New, self).__init__()
self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features //
in_features) - 1
self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) *
self.b[:, None, None], [out_features, in_features]))
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def output_size(self):
return 2 * self.osize
def forward(self, input_0):
primals_1 = self.b
primals_3 = self.a
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashwinpn/Computer-Vision
|
Embbed2
| false
| 6,260
|
[
"MIT"
] | 1
|
9dc3abfe416385171b76e2bad6872e10f36a12b4
|
https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4
|
SkipModule
|
import torch
import torch.nn
class SkipModule(torch.nn.Module):
def __init__(self, in_features, out_features, activation=torch.nn.ReLU()):
super(SkipModule, self).__init__()
self.linear1 = torch.nn.Linear(in_features, out_features, activation)
self.linear2 = torch.nn.Linear(out_features, out_features, activation)
self.linear3 = torch.nn.Linear(in_features + out_features,
out_features, activation)
def forward(self, x):
x1 = self.linear1(x)
x1 = self.linear2(x1)
x = torch.cat((x, x1), dim=-1)
return self.linear3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, buf1, buf2, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf3)
del primals_7
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf2, (64, 8), (8, 1), 0
), primals_6, primals_4
class SkipModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, activation=torch.nn.ReLU()):
super(SkipModuleNew, self).__init__()
self.linear1 = torch.nn.Linear(in_features, out_features, activation)
self.linear2 = torch.nn.Linear(out_features, out_features, activation)
self.linear3 = torch.nn.Linear(in_features + out_features,
out_features, activation)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ashwinpn/Computer-Vision
|
SkipModule
| false
| 6,261
|
[
"MIT"
] | 1
|
9dc3abfe416385171b76e2bad6872e10f36a12b4
|
https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4
|
BarlowTwinsLoss
|
import torch
import torch.nn as nn
class BarlowTwinsLoss(nn.Module):
def __init__(self, batch_size, lambda_coeff=0.005, z_dim=128):
super().__init__()
self.z_dim = z_dim
self.batch_size = batch_size
self.lambda_coeff = lambda_coeff
def off_diagonal_ele(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, z1, z2):
z1_norm = (z1 - torch.mean(z1, dim=0)) / torch.std(z1, dim=0)
z2_norm = (z2 - torch.mean(z2, dim=0)) / torch.std(z2, dim=0)
cross_corr = torch.matmul(z1_norm.T, z2_norm) / self.batch_size
on_diag = torch.diagonal(cross_corr).add_(-1).pow_(2).sum()
off_diag = self.off_diagonal_ele(cross_corr).pow_(2).sum()
return on_diag + self.lambda_coeff * off_diag
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tmp10 / tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + 5 * x0, tmp4, xmask)
@triton.jit
def triton_per_fused_pow_sum_3(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr1 + tl.broadcast_to(5 * r0, [XBLOCK, RBLOCK]), tmp1, None)
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None)
@triton.jit
def triton_per_fused_add_mul_sum_4(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + 5 * (r0 // 4) + 5 * (r0 % 4 // 4) + r0 %
4), rmask, other=0.0)
tmp6 = tl.load(in_out_ptr0 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, 1])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp8 = 0.005
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mean_std_sub_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_mean_std_sub_0[grid(16)](arg1_1, buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), buf1,
out=buf2)
del buf0
buf3 = buf1
del buf1
triton_poi_fused_div_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
triton_poi_fused_add_2[grid(4)](buf2, buf3, 4, XBLOCK=4, num_warps=
1, num_stages=1)
del buf2
buf7 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_pow_sum_3[grid(1)](buf3, buf3, buf7, 1, 4, XBLOCK=
1, num_warps=2, num_stages=1)
buf9 = buf7
del buf7
triton_per_fused_add_mul_sum_4[grid(1)](buf9, buf3, 1, 12, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
return buf9,
class BarlowTwinsLossNew(nn.Module):
def __init__(self, batch_size, lambda_coeff=0.005, z_dim=128):
super().__init__()
self.z_dim = z_dim
self.batch_size = batch_size
self.lambda_coeff = lambda_coeff
def off_diagonal_ele(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ashutoshml/lightning-tutorials
|
BarlowTwinsLoss
| false
| 6,262
|
[
"Apache-2.0"
] | 1
|
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
|
L2Norm
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(1).sqrt() + self.eps
x /= norm.expand_as(x)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x
) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp15
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp17, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0,
buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf1, buf0
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ashwath007/amenity-detection
|
L2Norm
| false
| 6,263
|
[
"Apache-2.0"
] | 1
|
acb885eb4d791acc6e65237445a4fc6830e4d30c
|
https://github.com/ashwath007/amenity-detection/tree/acb885eb4d791acc6e65237445a4fc6830e4d30c
|
SirenModule
|
import math
import torch
import torch.nn
class SirenModule(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SirenModule, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features)
init_bounds = math.sqrt(6 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, x):
return torch.sin(self.linear(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.sin(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class SirenModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SirenModuleNew, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features)
init_bounds = math.sqrt(6 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ashwinpn/Computer-Vision
|
SirenModule
| false
| 6,264
|
[
"MIT"
] | 1
|
9dc3abfe416385171b76e2bad6872e10f36a12b4
|
https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.