entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, phi=0.05):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
self.phi = phi
def forward(self, state, action):
a = F.relu(self.l1(torch.cat([state, action], 1)))
a = F.relu(self.l2(a))
a = self.phi * self.max_action * torch.tanh(self.l3(a))
return (a + action).clamp(-self.max_action, self.max_action)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 300
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_clamp_ge_le_logical_and_mul_tanh_3(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 0.2
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp6 = -4.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 4.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = tmp5 >= tmp6
tmp11 = tmp5 <= tmp8
tmp12 = tmp10 & tmp11
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (4, 300), (300, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), (
1, 400), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(300, 4), (1, 300), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_clamp_ge_le_logical_and_mul_tanh_3[grid(16)](buf5,
primals_2, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, buf0, buf2, buf4, buf5, buf7, primals_7, primals_5
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action, phi=0.05):
super(ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
self.phi = phi
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
DanielTakeshi/DCUR
|
Actor
| false
| 355
|
[
"MIT"
] | 0
|
1cdb00e7e68060ad3bba9a497106c327f6b5a663
|
https://github.com/DanielTakeshi/DCUR/tree/1cdb00e7e68060ad3bba9a497106c327f6b5a663
|
SelfAttention
|
import torch
import torch.nn as nn
class MultiHeadAttention(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask,
attn_mask=attn_mask)[0]
class SelfAttention(nn.Module):
def __init__(self, num_channels: 'int', num_heads: 'int', dropout: 'float'
):
super().__init__()
self.norm = nn.LayerNorm(num_channels)
self.attention = MultiHeadAttention(num_q_channels=num_channels,
num_kv_channels=num_channels, num_heads=num_heads, dropout=dropout)
def forward(self, x, pad_mask=None, attn_mask=None):
x = self.norm(x)
return self.attention(x, x, pad_mask=pad_mask, attn_mask=attn_mask)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4, 'num_heads': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
return buf12, primals_3, buf2, buf9, reinterpret_tensor(buf11, (4, 4),
(4, 1), 0), primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0
), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)
class MultiHeadAttention(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask,
attn_mask=attn_mask)[0]
class SelfAttentionNew(nn.Module):
def __init__(self, num_channels: 'int', num_heads: 'int', dropout: 'float'
):
super().__init__()
self.norm = nn.LayerNorm(num_channels)
self.attention = MultiHeadAttention(num_q_channels=num_channels,
num_kv_channels=num_channels, num_heads=num_heads, dropout=dropout)
def forward(self, input_0):
primals_1 = self.norm.weight
primals_2 = self.norm.bias
primals_4 = self.attention.attention.in_proj_weight
primals_5 = self.attention.attention.in_proj_bias
primals_3 = self.attention.attention.out_proj.weight
primals_7 = self.attention.attention.out_proj.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
DartingMelody/perceiver-io
|
SelfAttention
| false
| 356
|
[
"Apache-2.0"
] | 0
|
fb818b1763f61e259b23b8b014df2ac01c303a54
|
https://github.com/DartingMelody/perceiver-io/tree/fb818b1763f61e259b23b8b014df2ac01c303a54
|
SpaceToDepth
|
import torch
from torch import nn
class SpaceToDepth(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
x = x.view(N, C * self.bs ** 2, H // self.bs, W // self.bs)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'block_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 1, 1), 0),
class SpaceToDepthNew(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Dauriel/weather4cast2021
|
SpaceToDepth
| false
| 357
|
[
"Apache-2.0"
] | 0
|
29e818c4bcd488ec84b51558bf5392e4a887db70
|
https://github.com/Dauriel/weather4cast2021/tree/29e818c4bcd488ec84b51558bf5392e4a887db70
|
CrossAttention
|
import torch
import torch.nn as nn
class MultiHeadAttention(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask,
attn_mask=attn_mask)[0]
class CrossAttention(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.q_norm = nn.LayerNorm(num_q_channels)
self.kv_norm = nn.LayerNorm(num_kv_channels)
self.attention = MultiHeadAttention(num_q_channels=num_q_channels,
num_kv_channels=num_kv_channels, num_heads=num_heads, dropout=
dropout)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
x_q = self.q_norm(x_q)
x_kv = self.kv_norm(x_kv)
return self.attention(x_q, x_kv, pad_mask=pad_mask, attn_mask=attn_mask
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_q_channels': 4, 'num_kv_channels': 4, 'num_heads': 4,
'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (12, 4), (4, 1))
assert_size_stride(primals_8, (12,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_6, buf2, buf3,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0,
buf1, primals_1, primals_2, buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4
), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_6, buf2,
buf3, primals_4, primals_5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf2
del buf3
del primals_4
del primals_5
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 4),
buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 8),
buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf8)
buf9 = reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 16), 0)
del buf5
triton_poi_fused_mul_2[grid(16)](buf9, primals_8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf7, (4, 1, 4), (1, 1,
4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = buf10
del buf10
triton_poi_fused__softmax_4[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf11
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf12, reinterpret_tensor(buf8, (4, 4, 1), (1, 4,
1), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf13, buf14, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0)
del buf13
extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (4, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf15)
del primals_10
return buf15, primals_3, primals_6, buf4, buf6, buf12, reinterpret_tensor(
buf14, (4, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf8, (4,
1, 4), (1, 1, 4), 0), reinterpret_tensor(buf9, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 1), 0
), reinterpret_tensor(primals_7, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_7, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_7, (4, 4), (4, 1), 0)
class MultiHeadAttention(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask,
attn_mask=attn_mask)[0]
class CrossAttentionNew(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.q_norm = nn.LayerNorm(num_q_channels)
self.kv_norm = nn.LayerNorm(num_kv_channels)
self.attention = MultiHeadAttention(num_q_channels=num_q_channels,
num_kv_channels=num_kv_channels, num_heads=num_heads, dropout=
dropout)
def forward(self, input_0, input_1):
primals_1 = self.q_norm.weight
primals_2 = self.q_norm.bias
primals_4 = self.kv_norm.weight
primals_5 = self.kv_norm.bias
primals_7 = self.attention.attention.in_proj_weight
primals_8 = self.attention.attention.in_proj_bias
primals_3 = self.attention.attention.out_proj.weight
primals_10 = self.attention.attention.out_proj.bias
primals_6 = input_0
primals_9 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
DartingMelody/perceiver-io
|
CrossAttention
| false
| 358
|
[
"Apache-2.0"
] | 0
|
fb818b1763f61e259b23b8b014df2ac01c303a54
|
https://github.com/DartingMelody/perceiver-io/tree/fb818b1763f61e259b23b8b014df2ac01c303a54
|
Scale
|
import torch
import torch.nn as nn
class Scale(nn.Module):
def __init__(self, nchannels, bias=True, init_scale=1.0):
super().__init__()
self.nchannels = nchannels
self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1))
if bias:
self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1))
else:
self.register_parameter('bias', None)
self.reset_parameters(init_scale)
def reset_parameters(self, init_scale=1.0):
self.weight.data.fill_(init_scale)
if self.bias is not None:
self.bias.data.fill_(0.0)
def forward(self, x):
y = x * self.weight
if self.bias is not None:
y += self.bias
return y
def __repr__(self):
s = '{} ({}, {})'
return s.format(self.__class__.__name__, self.nchannels, self.bias
is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nchannels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class ScaleNew(nn.Module):
def __init__(self, nchannels, bias=True, init_scale=1.0):
super().__init__()
self.nchannels = nchannels
self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1))
if bias:
self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1))
else:
self.register_parameter('bias', None)
self.reset_parameters(init_scale)
def reset_parameters(self, init_scale=1.0):
self.weight.data.fill_(init_scale)
if self.bias is not None:
self.bias.data.fill_(0.0)
def __repr__(self):
s = '{} ({}, {})'
return s.format(self.__class__.__name__, self.nchannels, self.bias
is not None)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CuongNguyen218/ObjectDetection-OneStageDet
|
Scale
| false
| 359
|
[
"MIT"
] | 0
|
60efe8b0ee6782b2aea20a32264b2ce1fc21901f
|
https://github.com/CuongNguyen218/ObjectDetection-OneStageDet/tree/60efe8b0ee6782b2aea20a32264b2ce1fc21901f
|
Upsample_
|
import torch
from torch import nn
class Upsample_(nn.Module):
def __init__(self, scale=2):
super(Upsample_, self).__init__()
self.upsample = nn.Upsample(mode='bilinear', scale_factor=scale)
def forward(self, x):
return self.upsample(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf2, arg0_1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf2,
class Upsample_New(nn.Module):
def __init__(self, scale=2):
super(Upsample_New, self).__init__()
self.upsample = nn.Upsample(mode='bilinear', scale_factor=scale)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Dauriel/weather4cast2021
|
Upsample_
| false
| 360
|
[
"Apache-2.0"
] | 0
|
29e818c4bcd488ec84b51558bf5392e4a887db70
|
https://github.com/Dauriel/weather4cast2021/tree/29e818c4bcd488ec84b51558bf5392e4a887db70
|
SubPixelConvolutionalBlock
|
import torch
from torch import nn
class SubPixelConvolutionalBlock(nn.Module):
"""
A subpixel convolutional block, comprising convolutional, pixel-shuffle, and PReLU activation layers.
"""
def __init__(self, kernel_size=3, n_channels=64, scaling_factor=2):
"""
:param kernel_size: kernel size of the convolution
:param n_channels: number of input and output channels
:param scaling_factor: factor to scale input images by (along both dimensions)
"""
super(SubPixelConvolutionalBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=n_channels, out_channels=
n_channels * scaling_factor ** 2, kernel_size=kernel_size,
padding=kernel_size // 2)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor=scaling_factor)
self.prelu = nn.PReLU()
def forward(self, input):
"""
Forward propagation.
:param input: input images, a tensor of size (N, n_channels, w, h)
:return: scaled output images, a tensor of size (N, n_channels, w * scaling factor, h * scaling factor)
"""
output = self.conv(input)
output = self.pixel_shuffle(output)
output = self.prelu(output)
return output
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused__prelu_kernel_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 128
x2 = xindex // 16384 % 64
x3 = xindex // 1048576
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * (x1 % 2) + 4 * x2 + 256 * (x0 // 2) +
16384 * (x1 // 2) + 1048576 * x3 + x0 % 2), None)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x4, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16384, 9)](primals_1, buf0, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_1[grid(256, 4096)](primals_3, buf1, 256, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(4194304)](buf3, primals_2,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((4, 64, 128, 128), (1048576, 16384, 128,
1), torch.float32)
triton_poi_fused__prelu_kernel_3[grid(4194304)](buf3, primals_4,
buf4, 4194304, XBLOCK=512, num_warps=8, num_stages=1)
return buf4, buf0, buf1, primals_4, buf3
class SubPixelConvolutionalBlockNew(nn.Module):
"""
A subpixel convolutional block, comprising convolutional, pixel-shuffle, and PReLU activation layers.
"""
def __init__(self, kernel_size=3, n_channels=64, scaling_factor=2):
"""
:param kernel_size: kernel size of the convolution
:param n_channels: number of input and output channels
:param scaling_factor: factor to scale input images by (along both dimensions)
"""
super(SubPixelConvolutionalBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels=n_channels, out_channels=
n_channels * scaling_factor ** 2, kernel_size=kernel_size,
padding=kernel_size // 2)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor=scaling_factor)
self.prelu = nn.PReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.prelu.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
DanielLiang1/a-PyTorch-Tutorial-to-Super-Resolution
|
SubPixelConvolutionalBlock
| false
| 361
|
[
"MIT"
] | 0
|
cf7b519029687fe9726bb194fe3765934afa18b3
|
https://github.com/DanielLiang1/a-PyTorch-Tutorial-to-Super-Resolution/tree/cf7b519029687fe9726bb194fe3765934afa18b3
|
DoubleDense
|
import torch
from torch import nn
from torch.nn import functional as F
class DoubleDense(nn.Module):
def __init__(self, in_channels, hidden_neurons, output_channels):
super(DoubleDense, self).__init__()
self.dense1 = nn.Linear(in_channels, out_features=hidden_neurons)
self.dense2 = nn.Linear(in_features=hidden_neurons, out_features=
hidden_neurons // 2)
self.dense3 = nn.Linear(in_features=hidden_neurons // 2,
out_features=output_channels)
def forward(self, x):
out = F.relu(self.dense1(x.view(x.size(0), -1)))
out = F.relu(self.dense2(out))
out = self.dense3(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'hidden_neurons': 4, 'output_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (2, 4), (4, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (4, 2), (2, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 2), (1, 4
), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8)](buf3, primals_5, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class DoubleDenseNew(nn.Module):
def __init__(self, in_channels, hidden_neurons, output_channels):
super(DoubleDenseNew, self).__init__()
self.dense1 = nn.Linear(in_channels, out_features=hidden_neurons)
self.dense2 = nn.Linear(in_features=hidden_neurons, out_features=
hidden_neurons // 2)
self.dense3 = nn.Linear(in_features=hidden_neurons // 2,
out_features=output_channels)
def forward(self, input_0):
primals_1 = self.dense1.weight
primals_3 = self.dense1.bias
primals_4 = self.dense2.weight
primals_5 = self.dense2.bias
primals_6 = self.dense3.weight
primals_7 = self.dense3.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Dauriel/weather4cast2021
|
DoubleDense
| false
| 362
|
[
"Apache-2.0"
] | 0
|
29e818c4bcd488ec84b51558bf5392e4a887db70
|
https://github.com/Dauriel/weather4cast2021/tree/29e818c4bcd488ec84b51558bf5392e4a887db70
|
BasicDeconv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicDeconv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
use_bn=False):
super(BasicDeconv, self).__init__()
self.use_bn = use_bn
self.tconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, bias=not self.use_bn)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.tconv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(784)](buf1,
primals_2, buf2, 784, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class BasicDeconvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
use_bn=False):
super(BasicDeconvNew, self).__init__()
self.use_bn = use_bn
self.tconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, bias=not self.use_bn)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, input_0):
primals_1 = self.tconv.weight
primals_2 = self.tconv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DecAngel/C-3-Framework
|
BasicDeconv
| false
| 363
|
[
"MIT"
] | 0
|
00b792b4481a0ec9d7e30e290c66e7020235e79b
|
https://github.com/DecAngel/C-3-Framework/tree/00b792b4481a0ec9d7e30e290c66e7020235e79b
|
LinearNet
|
import torch
import torch.nn
import torch.optim
class LinearNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super().__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.nonlinear = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x: 'torch.Tensor'):
x = x.requires_grad_(True)
x = torch.nn.functional.normalize(x)
x = self.linear1(x)
x = self.nonlinear(x)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D_in': 4, 'H': 4, 'D_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), primals_4, buf4, primals_2
class LinearNetNew(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super().__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.nonlinear = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DavidV17/ReAgent
|
LinearNet
| false
| 364
|
[
"BSD-3-Clause"
] | 0
|
9b55696849cf133bc8494a5bce57df420ffe7517
|
https://github.com/DavidV17/ReAgent/tree/9b55696849cf133bc8494a5bce57df420ffe7517
|
AngleLoss
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from typing import *
class AngleLoss(nn.Module):
def __init__(self):
super(AngleLoss, self).__init__()
def forward(self, angle, angle_hat):
return torch.exp(F.mse_loss(angle_hat.float(), angle.float())) - 1
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_exp_mse_loss_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = 1.0
tmp11 = tmp9 - tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_exp_mse_loss_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AngleLossNew(nn.Module):
def __init__(self):
super(AngleLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DeVriesMatt/pointMLP-pytorch
|
AngleLoss
| false
| 365
|
[
"Apache-2.0"
] | 0
|
e9c09a2038551e83b072353f3fd7e3294463e892
|
https://github.com/DeVriesMatt/pointMLP-pytorch/tree/e9c09a2038551e83b072353f3fd7e3294463e892
|
VAE
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class VAE(nn.Module):
def __init__(self, state_dim, action_dim, latent_dim, max_action):
super(VAE, self).__init__()
self.e1 = nn.Linear(state_dim + action_dim, 750)
self.e2 = nn.Linear(750, 750)
self.mean = nn.Linear(750, latent_dim)
self.log_std = nn.Linear(750, latent_dim)
self.d1 = nn.Linear(state_dim + latent_dim, 750)
self.d2 = nn.Linear(750, 750)
self.d3 = nn.Linear(750, action_dim)
self.max_action = max_action
self.latent_dim = latent_dim
def forward(self, state, action):
z = F.relu(self.e1(torch.cat([state, action], 1)))
z = F.relu(self.e2(z))
mean = self.mean(z)
log_std = self.log_std(z).clamp(-4, 15)
std = torch.exp(log_std)
z = mean + std * torch.randn_like(std)
u = self.decode(state, z)
return u, mean, std
def decode(self, state, z=None):
if z is None:
z = torch.randn((state.shape[0], self.latent_dim)).clamp(-0.5, 0.5)
a = F.relu(self.d1(torch.cat([state, z], 1)))
a = F.relu(self.d2(a))
return self.max_action * torch.tanh(self.d3(a))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'latent_dim': 4,
'max_action': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 3000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 750
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_clamp_exp_ge_le_logical_and_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -4.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 15.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 >= tmp3
tmp9 = tmp2 <= tmp5
tmp10 = tmp8 & tmp9
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr3 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (750, 8), (8, 1))
assert_size_stride(primals_4, (750,), (1,))
assert_size_stride(primals_5, (750, 750), (750, 1))
assert_size_stride(primals_6, (750,), (1,))
assert_size_stride(primals_7, (4, 750), (750, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 750), (750, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (750, 8), (8, 1))
assert_size_stride(primals_12, (750,), (1,))
assert_size_stride(primals_13, (750, 750), (750, 1))
assert_size_stride(primals_14, (750,), (1,))
assert_size_stride(primals_15, (4, 750), (750, 1))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 750), (750, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 750), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(3000)](buf2, primals_4, 3000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 750), (750, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (750, 750), (
1, 750), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(3000)](buf4, primals_6, 3000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(750, 4), (1, 750), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_9, (750, 4), (1,
750), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_clamp_exp_ge_le_logical_and_2[grid(16)](buf6,
primals_10, buf7, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_10
buf8 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_3[grid(32)](primals_1, buf5, buf7, buf9, buf10,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf11 = empty_strided_cuda((4, 750), (750, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (8, 750), (
1, 8), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_1[grid(3000)](buf12, primals_12, 3000, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_12
buf13 = empty_strided_cuda((4, 750), (750, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (750, 750),
(1, 750), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_relu_1[grid(3000)](buf14, primals_14, 3000, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_14
buf15 = buf6
del buf6
extern_kernels.addmm(primals_16, buf14, reinterpret_tensor(
primals_15, (750, 4), (1, 750), 0), alpha=1, beta=1, out=buf15)
del primals_16
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_tanh_4[grid(16)](buf15, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return (buf16, buf5, buf7, buf0, buf2, buf4, buf7, buf9, buf10, buf12,
buf14, buf15, primals_15, primals_13, primals_11, buf17, primals_9,
primals_7, primals_5)
class VAENew(nn.Module):
def __init__(self, state_dim, action_dim, latent_dim, max_action):
super(VAENew, self).__init__()
self.e1 = nn.Linear(state_dim + action_dim, 750)
self.e2 = nn.Linear(750, 750)
self.mean = nn.Linear(750, latent_dim)
self.log_std = nn.Linear(750, latent_dim)
self.d1 = nn.Linear(state_dim + latent_dim, 750)
self.d2 = nn.Linear(750, 750)
self.d3 = nn.Linear(750, action_dim)
self.max_action = max_action
self.latent_dim = latent_dim
def decode(self, state, z=None):
if z is None:
z = torch.randn((state.shape[0], self.latent_dim)).clamp(-0.5, 0.5)
a = F.relu(self.d1(torch.cat([state, z], 1)))
a = F.relu(self.d2(a))
return self.max_action * torch.tanh(self.d3(a))
def forward(self, input_0, input_1):
primals_3 = self.e1.weight
primals_4 = self.e1.bias
primals_5 = self.e2.weight
primals_6 = self.e2.bias
primals_7 = self.mean.weight
primals_8 = self.mean.bias
primals_9 = self.log_std.weight
primals_10 = self.log_std.bias
primals_11 = self.d1.weight
primals_12 = self.d1.bias
primals_13 = self.d2.weight
primals_14 = self.d2.bias
primals_15 = self.d3.weight
primals_16 = self.d3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0], output[1], output[2]
|
DanielTakeshi/DCUR
|
VAE
| false
| 366
|
[
"MIT"
] | 0
|
1cdb00e7e68060ad3bba9a497106c327f6b5a663
|
https://github.com/DanielTakeshi/DCUR/tree/1cdb00e7e68060ad3bba9a497106c327f6b5a663
|
Upsample
|
import torch
from torch import nn
class Upsample(nn.Module):
def __init__(self, input_dim, output_dim, kernel, stride):
super(Upsample, self).__init__()
self.upsample = nn.ConvTranspose2d(input_dim, output_dim,
kernel_size=kernel, stride=stride)
def forward(self, x):
return self.upsample(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'kernel': 4, 'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(784)](buf1, primals_2, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class UpsampleNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel, stride):
super(UpsampleNew, self).__init__()
self.upsample = nn.ConvTranspose2d(input_dim, output_dim,
kernel_size=kernel, stride=stride)
def forward(self, input_0):
primals_1 = self.upsample.weight
primals_2 = self.upsample.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Dauriel/weather4cast2021
|
Upsample
| false
| 367
|
[
"Apache-2.0"
] | 0
|
29e818c4bcd488ec84b51558bf5392e4a887db70
|
https://github.com/Dauriel/weather4cast2021/tree/29e818c4bcd488ec84b51558bf5392e4a887db70
|
DECLoss
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class DECLoss(nn.Module):
def __init__(self):
super(DECLoss, self).__init__()
def target_distribution(self, q):
weight = q ** 2 / q.sum(0)
return Variable((weight.t() / weight.sum(1)).t().data,
requires_grad=True)
def KL_div(self, q, p):
res = torch.sum(p * torch.log(p / q))
return res
def forward(self, q):
p = self.target_distribution(q)
return self.KL_div(q, p)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tmp1 / tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_div_sum_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x1 + 4 * y0), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_t_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_pow_sum_0[grid(16)](arg0_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_sum_1[grid(4, 4)](buf0, buf1, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_div_sum_t_2[grid(4, 4)](buf1, buf2, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
del buf1
return buf2,
class DECLossNew(nn.Module):
def __init__(self):
super(DECLossNew, self).__init__()
def target_distribution(self, q):
weight = q ** 2 / q.sum(0)
return Variable((weight.t() / weight.sum(1)).t().data,
requires_grad=True)
def KL_div(self, q, p):
res = torch.sum(p * torch.log(p / q))
return res
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Crazy-Jack/SpatialExpGeneCluster
|
DECLoss
| false
| 368
|
[
"MIT"
] | 0
|
9e57c308d1c577a936a2358d0641c65b8130034f
|
https://github.com/Crazy-Jack/SpatialExpGeneCluster/tree/9e57c308d1c577a936a2358d0641c65b8130034f
|
SAModule_Head
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False, **kwargs):
super(BasicConv, self).__init__()
self.use_bn = use_bn
self.conv = nn.Conv2d(in_channels, out_channels, bias=not self.
use_bn, **kwargs)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
class SAModule_Head(nn.Module):
def __init__(self, in_channels, out_channels, use_bn):
super(SAModule_Head, self).__init__()
branch_out = out_channels // 4
self.branch1x1 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=1)
self.branch3x3 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=3, padding=1)
self.branch5x5 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=5, padding=2)
self.branch7x7 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=7, padding=3)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3(x)
branch5x5 = self.branch5x5(x)
branch7x7 = self.branch7x7(x)
out = torch.cat([branch1x1, branch3x3, branch5x5, branch7x7], 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'use_bn': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_repeat_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, 1])
tmp4 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp4 - tmp14
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp28 + tmp3
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp25, xmask)
tl.store(out_ptr3 + (4 * r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_repeat_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, 1])
tmp4 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp4 - tmp14
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp28 + tmp3
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp25, xmask)
tl.store(out_ptr3 + (4 * r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (1, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_12, (1,), (1,))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf6 = reinterpret_tensor(buf4, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf4
buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf28 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 0)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_cat_repeat_0[grid(4)](buf6,
primals_3, primals_4, buf0, buf1, buf2, buf3, buf28, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
del primals_4
buf7 = extern_kernels.convolution(primals_2, primals_5, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1))
buf8 = empty_strided_cuda((4,), (1,), torch.float32)
buf9 = empty_strided_cuda((4,), (1,), torch.float32)
buf10 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf11 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf13 = reinterpret_tensor(buf11, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf11
buf29 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 1)
triton_per_fused__native_batch_norm_legit_cat_repeat_1[grid(4)](buf13,
primals_6, primals_7, buf7, buf8, buf9, buf10, buf29, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_6
del primals_7
buf14 = extern_kernels.convolution(primals_2, primals_8, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 1, 4, 4), (16, 16, 4, 1))
buf15 = empty_strided_cuda((4,), (1,), torch.float32)
buf16 = empty_strided_cuda((4,), (1,), torch.float32)
buf17 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf18 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf20 = reinterpret_tensor(buf18, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf18
buf30 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 2)
triton_per_fused__native_batch_norm_legit_cat_repeat_1[grid(4)](buf20,
primals_9, primals_10, buf14, buf15, buf16, buf17, buf30, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_10
del primals_9
buf21 = extern_kernels.convolution(primals_2, primals_11, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 1, 4, 4), (16, 16, 4, 1))
buf22 = empty_strided_cuda((4,), (1,), torch.float32)
buf23 = empty_strided_cuda((4,), (1,), torch.float32)
buf24 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf25 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf27 = reinterpret_tensor(buf25, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf25
buf31 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 3)
triton_per_fused__native_batch_norm_legit_cat_repeat_1[grid(4)](buf27,
primals_12, primals_13, buf21, buf22, buf23, buf24, buf31, 4,
16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_12
del primals_13
buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_2[grid(16, 16)](buf32, buf33, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
del buf28
del buf29
del buf30
del buf31
del buf32
return (buf33, primals_1, primals_2, primals_5, primals_8, primals_11,
buf0, buf1, buf2, buf3, buf6, buf7, buf8, buf9, buf10, buf13, buf14,
buf15, buf16, buf17, buf20, buf21, buf22, buf23, buf24, buf27)
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False, **kwargs):
super(BasicConv, self).__init__()
self.use_bn = use_bn
self.conv = nn.Conv2d(in_channels, out_channels, bias=not self.
use_bn, **kwargs)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
class SAModule_HeadNew(nn.Module):
def __init__(self, in_channels, out_channels, use_bn):
super(SAModule_HeadNew, self).__init__()
branch_out = out_channels // 4
self.branch1x1 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=1)
self.branch3x3 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=3, padding=1)
self.branch5x5 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=5, padding=2)
self.branch7x7 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=7, padding=3)
def forward(self, input_0):
primals_1 = self.branch1x1.conv.weight
primals_3 = self.branch1x1.bn.weight
primals_4 = self.branch1x1.bn.bias
primals_5 = self.branch3x3.conv.weight
primals_6 = self.branch3x3.bn.weight
primals_7 = self.branch3x3.bn.bias
primals_8 = self.branch5x5.conv.weight
primals_9 = self.branch5x5.bn.weight
primals_10 = self.branch5x5.bn.bias
primals_11 = self.branch7x7.conv.weight
primals_12 = self.branch7x7.bn.weight
primals_13 = self.branch7x7.bn.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
DecAngel/C-3-Framework
|
SAModule_Head
| false
| 369
|
[
"MIT"
] | 0
|
00b792b4481a0ec9d7e30e290c66e7020235e79b
|
https://github.com/DecAngel/C-3-Framework/tree/00b792b4481a0ec9d7e30e290c66e7020235e79b
|
TripletLoss
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim.lr_scheduler import *
def _batch_hard(mat_distance, mat_similarity, indice=False):
sorted_mat_distance, positive_indices = torch.sort(mat_distance + -
9999999.0 * (1 - mat_similarity), dim=1, descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance +
9999999.0 * mat_similarity, dim=1, descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if indice:
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
class TripletLoss(nn.Module):
"""
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, emb, label):
if self.normalize_feature:
emb = F.normalize(emb)
mat_dist = euclidean_dist(emb, emb)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an = _batch_hard(mat_dist, mat_sim)
assert dist_an.size(0) == dist_ap.size(0)
y = torch.ones_like(dist_ap)
loss = self.margin_loss(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1.0 / y.size(0)
return loss, prec
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp29 = tl.load(in_ptr1 + (x0 + 4 * r1), xmask, other=0.0)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp11 + tmp22
tmp24 = tmp0 + tmp23
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = libdevice.sqrt(tmp26)
tmp30 = tmp28 == tmp29
tmp31 = tmp30.to(tl.float32)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = -9999999.0
tmp35 = tmp33 * tmp34
tmp36 = tmp27 + tmp35
tmp37 = r1
tmp38 = tmp37.to(tl.int16)
tmp39 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp40 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41, _tmp42 = triton_helpers.sort_with_index(tmp39, tmp40, None, 1,
stable=False, descending=True)
tmp43 = 9999999.0
tmp44 = tmp31 * tmp43
tmp45 = tmp27 + tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp47, _tmp48 = triton_helpers.sort_with_index(tmp46, tmp40, None, 1,
stable=False, descending=False)
tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp24, xmask)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp41, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp47, xmask)
@triton.jit
def triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = -1.0
tmp4 = tmp3 * tmp2
tmp5 = 4.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp0 > tmp1
tmp13 = tmp12.to(tl.int64)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp16.to(tl.float32)
tmp18 = 1.0
tmp19 = tmp17 * tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tmp22 = tmp11 / tmp5
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0[grid(4)](
buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf9 = empty_strided_cuda((), (), torch.float32)
buf8 = buf6
del buf6
triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1[grid(1)](
buf8, buf4, buf2, buf9, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf4
return buf8, buf9
def _batch_hard(mat_distance, mat_similarity, indice=False):
sorted_mat_distance, positive_indices = torch.sort(mat_distance + -
9999999.0 * (1 - mat_similarity), dim=1, descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance +
9999999.0 * mat_similarity, dim=1, descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if indice:
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
class TripletLossNew(nn.Module):
"""
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLossNew, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
Dingyuan-Zheng/ctf-UDA
|
TripletLoss
| false
| 370
|
[
"MIT"
] | 0
|
3e3c67f68d7eb0b52a16a259e5a77e153062c4fd
|
https://github.com/Dingyuan-Zheng/ctf-UDA/tree/3e3c67f68d7eb0b52a16a259e5a77e153062c4fd
|
MultiHeadAttention
|
import torch
import torch.nn as nn
class MultiHeadAttention(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask,
attn_mask=attn_mask)[0]
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_q_channels': 4, 'num_kv_channels': 4, 'num_heads': 4,
'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 4),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 8),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_6, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_6
return buf9, primals_1, primals_2, buf6, reinterpret_tensor(buf8, (4, 4
), (4, 1), 0), primals_5, reinterpret_tensor(buf2, (4, 1, 4), (1, 1,
4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, input_0, input_1):
primals_3 = self.attention.in_proj_weight
primals_4 = self.attention.in_proj_bias
primals_1 = self.attention.out_proj.weight
primals_6 = self.attention.out_proj.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
DartingMelody/perceiver-io
|
MultiHeadAttention
| false
| 371
|
[
"Apache-2.0"
] | 0
|
fb818b1763f61e259b23b8b014df2ac01c303a54
|
https://github.com/DartingMelody/perceiver-io/tree/fb818b1763f61e259b23b8b014df2ac01c303a54
|
AUGLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class AUGLoss(nn.Module):
def __init__(self):
super(AUGLoss, self).__init__()
def forward(self, x1, x2):
b = x1 - x2
b = b * b
b = b.sum(1)
b = torch.sqrt(b)
return b.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sqrt_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0,
1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AUGLossNew(nn.Module):
def __init__(self):
super(AUGLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DonghyunAhn/sadvirus
|
AUGLoss
| false
| 372
|
[
"MIT"
] | 0
|
cdcc98812d613962a7003ff0c6013d0805bde024
|
https://github.com/DonghyunAhn/sadvirus/tree/cdcc98812d613962a7003ff0c6013d0805bde024
|
Conv2d_fw
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
class Conv2d_fw(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, bias=True):
super(Conv2d_fw, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, bias=bias)
self.weight.fast = None
if self.bias is not None:
self.bias.fast = None
def forward(self, x):
if self.bias is None:
if self.weight.fast is not None:
out = F.conv2d(x, self.weight.fast, None, stride=self.
stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
elif self.weight.fast is not None and self.bias.fast is not None:
out = F.conv2d(x, self.weight.fast, self.bias.fast, stride=self
.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf1, primals_2, primals_3
class Conv2d_fwNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, bias=True):
super(Conv2d_fwNew, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, bias=bias)
self.weight.fast = None
if self.bias is not None:
self.bias.fast = None
def forward(self, input_0):
primals_2 = self.weight
primals_1 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DingYuan0118/Meta-Fine-Tuning
|
Conv2d_fw
| false
| 373
|
[
"MIT"
] | 0
|
531b7418420c072844216ec5217f1f03f6419a79
|
https://github.com/DingYuan0118/Meta-Fine-Tuning/tree/531b7418420c072844216ec5217f1f03f6419a79
|
ContrastiveDistanceLoss
|
import torch
import torch.nn as nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
class ContrastiveDistanceLoss(nn.Module):
"""The Contrastive distance loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, distance_pred, distance_true) ->torch.Tensor:
"""Forward propagation method for the contrastive loss.
Args:
distance_pred: predicted distances
distance_true: true distances
Returns:
torch.Tensor: loss
"""
bs = len(distance_true)
margin_distance = self.margin - distance_pred
margin_distance = torch.clamp(margin_distance, min=0.0)
loss = (1 - distance_true) * torch.pow(distance_pred, 2
) + distance_true * torch.pow(margin_distance, 2)
if self.reduction == 'mean':
loss = torch.sum(loss) / 2.0 / bs
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = tmp1 - tmp3
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8 * tmp8
tmp10 = tmp0 * tmp9
tmp11 = tmp5 + tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ContrastiveDistanceLossNew(nn.Module):
"""The Contrastive distance loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Dokholyan/catalyst
|
ContrastiveDistanceLoss
| false
| 374
|
[
"Apache-2.0"
] | 0
|
de8e681676d76741fdb722d4cd77274ba616915d
|
https://github.com/Dokholyan/catalyst/tree/de8e681676d76741fdb722d4cd77274ba616915d
|
ContrastiveLoss
|
import torch
class ContrastiveLoss(torch.nn.Module):
def __init__(self, margin=1.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative):
distance_anchor_positive = (anchor - positive).pow(2).sum(1)
distance_anchor_negative = (anchor - negative).pow(2).sum(1)
loss_triplet = torch.relu(distance_anchor_positive -
distance_anchor_negative + self.margin)
return loss_triplet.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 1.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class ContrastiveLossNew(torch.nn.Module):
def __init__(self, margin=1.0):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
DongChengdongHangZhou/Siamese-Network-tiff
|
ContrastiveLoss
| false
| 375
|
[
"MIT"
] | 0
|
aaf923ad59301af1b3237e605964341a90dc414b
|
https://github.com/DongChengdongHangZhou/Siamese-Network-tiff/tree/aaf923ad59301af1b3237e605964341a90dc414b
|
ContrastiveEmbeddingLoss
|
import torch
import torch.nn as nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
class ContrastiveEmbeddingLoss(nn.Module):
"""The Contrastive embedding loss.
It has been proposed in `Dimensionality Reduction
by Learning an Invariant Mapping`_.
.. _Dimensionality Reduction by Learning an Invariant Mapping:
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, embeddings_left: 'torch.Tensor', embeddings_right:
'torch.Tensor', distance_true) ->torch.Tensor:
"""Forward propagation method for the contrastive loss.
Args:
embeddings_left: left objects embeddings
embeddings_right: right objects embeddings
distance_true: true distances
Returns:
torch.Tensor: loss
"""
diff = embeddings_left - embeddings_right
distance_pred = torch.sqrt(torch.sum(torch.pow(diff, 2), 1))
bs = len(distance_true)
margin_distance = self.margin - distance_pred
margin_distance = torch.clamp(margin_distance, min=0.0)
loss = (1 - distance_true) * torch.pow(distance_pred, 2
) + distance_true * torch.pow(margin_distance, 2)
if self.reduction == 'mean':
loss = torch.sum(loss) / 2.0 / bs
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = tmp1 - tmp3
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8 * tmp8
tmp10 = tmp0 * tmp9
tmp11 = tmp5 + tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sqrt_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_div_mul_pow_rsub_sum_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveEmbeddingLossNew(nn.Module):
"""The Contrastive embedding loss.
It has been proposed in `Dimensionality Reduction
by Learning an Invariant Mapping`_.
.. _Dimensionality Reduction by Learning an Invariant Mapping:
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Dokholyan/catalyst
|
ContrastiveEmbeddingLoss
| false
| 376
|
[
"Apache-2.0"
] | 0
|
de8e681676d76741fdb722d4cd77274ba616915d
|
https://github.com/Dokholyan/catalyst/tree/de8e681676d76741fdb722d4cd77274ba616915d
|
SoftEntropy
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim.lr_scheduler import *
class SoftEntropy(nn.Module):
def __init__(self):
super(SoftEntropy, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
loss = (-F.softmax(targets, dim=1).detach() * log_probs).mean(0).sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + x3, xmask)
tmp11 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = -tmp8
tmp12 = tl_math.exp(tmp11)
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp12 + tmp14
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp15 + tmp17
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tl_math.log(tmp21)
tmp23 = tmp10 - tmp22
tmp24 = tmp9 * tmp23
tl.store(out_ptr0 + x3, tmp24, xmask)
@triton.jit
def triton_per_fused_mean_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(256)](buf0,
buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_mean_sum_3[grid(1)](buf2, buf3, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del buf2
return buf3,
class SoftEntropyNew(nn.Module):
def __init__(self):
super(SoftEntropyNew, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Dingyuan-Zheng/ctf-UDA
|
SoftEntropy
| false
| 377
|
[
"MIT"
] | 0
|
3e3c67f68d7eb0b52a16a259e5a77e153062c4fd
|
https://github.com/Dingyuan-Zheng/ctf-UDA/tree/3e3c67f68d7eb0b52a16a259e5a77e153062c4fd
|
ContrastivePairwiseEmbeddingLoss
|
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
class ContrastivePairwiseEmbeddingLoss(nn.Module):
"""ContrastivePairwiseEmbeddingLoss – proof of concept criterion.
Still work in progress.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, embeddings_pred, embeddings_true) ->torch.Tensor:
"""Forward propagation method for the contrastive loss.
Work in progress.
Args:
embeddings_pred: predicted embeddings
embeddings_true: true embeddings
Returns:
torch.Tensor: loss
"""
device = embeddings_pred.device
pairwise_similarity = torch.einsum('se,ae->sa', embeddings_pred,
embeddings_true)
bs = embeddings_pred.shape[0]
batch_idx = torch.arange(bs, device=device)
loss = F.cross_entropy(pairwise_similarity, batch_idx, reduction=
self.reduction)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_arange_nll_loss_forward_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp6 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp0 = r0
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 != tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tmp5 = tl.load(in_ptr0 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp5 - tmp17
tmp19 = -tmp18
tmp20 = 0.0
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp2.to(tl.int64)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp29 = tmp28.to(tl.float32)
tmp30 = tmp24 / tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (1, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (1, 4, 4), (0, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused_arange_nll_loss_forward_1[grid(1)](buf4, buf1, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf4,
class ContrastivePairwiseEmbeddingLossNew(nn.Module):
"""ContrastivePairwiseEmbeddingLoss – proof of concept criterion.
Still work in progress.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Dokholyan/catalyst
|
ContrastivePairwiseEmbeddingLoss
| false
| 378
|
[
"Apache-2.0"
] | 0
|
de8e681676d76741fdb722d4cd77274ba616915d
|
https://github.com/Dokholyan/catalyst/tree/de8e681676d76741fdb722d4cd77274ba616915d
|
PairwiseDistanceMatrix
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class PairwiseDistanceMatrix(nn.Module):
def __init__(self):
super(PairwiseDistanceMatrix, self).__init__()
def forward(self, X, Y):
X2 = (X ** 2).sum(1).view(-1, 1)
Y2 = (Y ** 2).sum(1).view(1, -1)
D = X2 + Y2 - 2.0 * torch.mm(X, Y.t())
return D
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tl.store(in_out_ptr0 + x2, tmp26, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4),
0), out=buf0)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_sub_0[grid(16)](buf1, arg0_1, arg1_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PairwiseDistanceMatrixNew(nn.Module):
def __init__(self):
super(PairwiseDistanceMatrixNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DreamBlack/APCNet
|
PairwiseDistanceMatrix
| false
| 379
|
[
"MIT"
] | 0
|
d76bc9e46c3b631035c5c67e2367b6fb80621333
|
https://github.com/DreamBlack/APCNet/tree/d76bc9e46c3b631035c5c67e2367b6fb80621333
|
GenerateMask
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class VectorAttention(nn.Module):
"""vector attention"""
def __init__(self, input_dim, hidden_dim):
super(VectorAttention, self).__init__()
self.theta = nn.Linear(input_dim, hidden_dim)
self.phi = nn.Linear(input_dim, hidden_dim)
self.psi = nn.Linear(input_dim, hidden_dim)
self.recover = nn.Linear(hidden_dim, input_dim)
def forward(self, x):
x_t = self.theta(x)
x_ph = self.phi(x)
x_psi = self.psi(x)
attention_map = torch.matmul(x_ph, torch.transpose(x_t, 0, 1))
attention_map = attention_map
attention_map = F.softmax(attention_map, dim=1)
x_add = torch.matmul(attention_map, x_psi)
x_add = self.recover(x_add)
return x + x_add
class GenerateMask(nn.Module):
"""
sparsify by 1x1 conv and apply gumbel softmax
"""
def __init__(self, ch, resolution, or_cadidate=1000, no_attention=False):
super(GenerateMask, self).__init__()
self.conv = nn.Conv2d(ch, or_cadidate, kernel_size=1, padding=0,
bias=False)
self.relu = nn.ReLU()
self.no_attention = no_attention
if not self.no_attention:
self.prob_attention = VectorAttention(input_dim=or_cadidate,
hidden_dim=or_cadidate // 3)
def forward(self, x):
"""
input x: [n, L, c, kernel, kernel]
output mask: [n * L, 1, h, w]
"""
n, L, c, h, w = x.shape
y = self.conv(x.reshape(-1, c, h, w))
prob_vector = F.softmax(y.sum((2, 3)), dim=1)
prob_vector_previous = prob_vector
if not self.no_attention:
prob_vector = self.prob_attention(prob_vector)
prob_vector = F.softmax(prob_vector)
weighted_mask = (prob_vector[:, :, None, None] * y).sum((1,),
keepdim=True)
weighted_mask = F.softmax(weighted_mask.view(n * L, 1, h * w), dim=2
).view(n * L, 1, h, w)
return weighted_mask, prob_vector, prob_vector_previous, y
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch': 4, 'resolution': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_view_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_convolution_sum_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16000
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 1000
x1 = xindex // 1000
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1000 * r2 + 16000 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (r2 + 16 * x3), tmp0, xmask)
tl.store(out_ptr1 + x3, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 1000
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1000 * x0), rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.where(rmask, tmp1, float('-inf'))
tmp4 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = tl.where(rmask, tmp7, 0)
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 1000 * x0), tmp11, rmask)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_per_fused__softmax_add_4(in_ptr0, in_ptr1, in_ptr2, out_ptr2,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 1000
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1000 * x0), rmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 1000 * x0), rmask, other=0.0)
tmp2 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0
)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = tl.where(rmask, tmp5, float('-inf'))
tmp8 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp7, 0))
tmp9 = tmp4 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = tl.where(rmask, tmp11, 0)
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 1000 * x0), tmp15, rmask)
@triton.jit
def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 125
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x1 = xindex // 16
x0 = xindex % 16
_tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 125 * x1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 2000 * x1), rmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = _tmp4 + tmp3
_tmp4 = tl.where(rmask, tmp5, _tmp4)
tmp4 = tl.sum(_tmp4, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_per_fused_mul_sum_6(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 128 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (1000, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (333, 1000), (1000, 1))
assert_size_stride(primals_4, (333,), (1,))
assert_size_stride(primals_5, (333, 1000), (1000, 1))
assert_size_stride(primals_6, (333,), (1,))
assert_size_stride(primals_7, (333, 1000), (1000, 1))
assert_size_stride(primals_8, (333,), (1,))
assert_size_stride(primals_9, (1000, 333), (333, 1))
assert_size_stride(primals_10, (1000,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_view_0[grid(64, 16)](primals_1, buf0, 64, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (16, 1000, 4, 4), (16000, 1, 4000, 1000))
buf2 = empty_strided_cuda((16, 1000, 4, 4), (16000, 16, 4, 1),
torch.float32)
buf3 = empty_strided_cuda((16, 1000), (1000, 1), torch.float32)
triton_per_fused_convolution_sum_1[grid(16000)](buf1, buf2, buf3,
16000, 16, XBLOCK=32, num_warps=4, num_stages=1)
del buf1
buf6 = empty_strided_cuda((16, 1000), (1000, 1), torch.float32)
triton_per_fused__softmax_2[grid(16)](buf3, buf6, 16, 1000,
num_warps=8, num_stages=1)
buf7 = empty_strided_cuda((16, 333), (333, 1), torch.float32)
extern_kernels.addmm(primals_4, buf6, reinterpret_tensor(primals_3,
(1000, 333), (1, 1000), 0), alpha=1, beta=1, out=buf7)
del primals_4
buf8 = empty_strided_cuda((16, 333), (333, 1), torch.float32)
extern_kernels.addmm(primals_6, buf6, reinterpret_tensor(primals_5,
(1000, 333), (1, 1000), 0), alpha=1, beta=1, out=buf8)
del primals_6
buf9 = empty_strided_cuda((16, 333), (333, 1), torch.float32)
extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7,
(1000, 333), (1, 1000), 0), alpha=1, beta=1, out=buf9)
del primals_8
buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(buf7, (333, 16), (1, 333
), 0), out=buf10)
buf13 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_3[grid(16)](buf10, buf13, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
buf14 = empty_strided_cuda((16, 333), (333, 1), torch.float32)
extern_kernels.mm(buf13, buf9, out=buf14)
buf15 = buf3
del buf3
extern_kernels.mm(buf14, reinterpret_tensor(primals_9, (333, 1000),
(1, 333), 0), out=buf15)
buf18 = empty_strided_cuda((16, 1000), (1000, 1), torch.float32)
triton_per_fused__softmax_add_4[grid(16)](buf6, buf15, primals_10,
buf18, 16, 1000, num_warps=8, num_stages=1)
del buf15
del primals_10
buf19 = empty_strided_cuda((16, 1, 4, 4, 8), (128, 2048, 4, 1, 16),
torch.float32)
triton_red_fused_mul_sum_5[grid(2048)](buf18, buf2, buf19, 2048,
125, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf20 = reinterpret_tensor(buf10, (16, 1, 4, 4), (16, 16, 4, 1), 0)
del buf10
triton_per_fused_mul_sum_6[grid(256)](buf19, buf20, 256, 8, XBLOCK=
64, num_warps=4, num_stages=1)
del buf19
buf23 = empty_strided_cuda((16, 1, 16), (16, 16, 1), torch.float32)
triton_per_fused__softmax_3[grid(16)](buf20, buf23, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del buf20
return (reinterpret_tensor(buf23, (16, 1, 4, 4), (16, 16, 4, 1), 0),
buf18, buf6, buf2, primals_2, buf0, buf2, buf6, buf8, buf13, buf14,
buf18, buf23, primals_9, reinterpret_tensor(buf9, (333, 16), (1,
333), 0), buf7, primals_7, primals_5, primals_3)
class VectorAttention(nn.Module):
"""vector attention"""
def __init__(self, input_dim, hidden_dim):
super(VectorAttention, self).__init__()
self.theta = nn.Linear(input_dim, hidden_dim)
self.phi = nn.Linear(input_dim, hidden_dim)
self.psi = nn.Linear(input_dim, hidden_dim)
self.recover = nn.Linear(hidden_dim, input_dim)
def forward(self, x):
x_t = self.theta(x)
x_ph = self.phi(x)
x_psi = self.psi(x)
attention_map = torch.matmul(x_ph, torch.transpose(x_t, 0, 1))
attention_map = attention_map
attention_map = F.softmax(attention_map, dim=1)
x_add = torch.matmul(attention_map, x_psi)
x_add = self.recover(x_add)
return x + x_add
class GenerateMaskNew(nn.Module):
"""
sparsify by 1x1 conv and apply gumbel softmax
"""
def __init__(self, ch, resolution, or_cadidate=1000, no_attention=False):
super(GenerateMaskNew, self).__init__()
self.conv = nn.Conv2d(ch, or_cadidate, kernel_size=1, padding=0,
bias=False)
self.relu = nn.ReLU()
self.no_attention = no_attention
if not self.no_attention:
self.prob_attention = VectorAttention(input_dim=or_cadidate,
hidden_dim=or_cadidate // 3)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.prob_attention.theta.weight
primals_4 = self.prob_attention.theta.bias
primals_5 = self.prob_attention.phi.weight
primals_6 = self.prob_attention.phi.bias
primals_7 = self.prob_attention.psi.weight
primals_8 = self.prob_attention.psi.bias
primals_9 = self.prob_attention.recover.weight
primals_10 = self.prob_attention.recover.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1], output[2], output[3]
|
Crazy-Jack/BigGAN-PyTorch
|
GenerateMask
| false
| 380
|
[
"MIT"
] | 0
|
1a5644e9c87cc399580c96cfeb180052076888da
|
https://github.com/Crazy-Jack/BigGAN-PyTorch/tree/1a5644e9c87cc399580c96cfeb180052076888da
|
MLP
|
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
"""
fc1, fc2
two fully connected layers
fc3
output layer
relu
activation function for hidden layers
sigmoid
activation function for output layer
"""
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(392, 160)
self.fc2 = nn.Linear(160, 64)
self.fc3 = nn.Linear(64, 2)
def forward(self, x):
x = x.view(-1, 392)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 392])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 160
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 392), (392, 1))
assert_size_stride(primals_2, (160, 392), (392, 1))
assert_size_stride(primals_3, (160,), (1,))
assert_size_stride(primals_4, (64, 160), (160, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (2, 64), (64, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 160), (160, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (392,
160), (1, 392), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(640)](buf1, primals_3, 640, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (160, 64), (1,
160), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (64, 2), (1,
64), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_sigmoid_2[grid(8)](buf5, primals_7, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_7
return buf5, primals_1, buf1, buf3, buf5, primals_6, primals_4
class MLPNew(nn.Module):
"""
fc1, fc2
two fully connected layers
fc3
output layer
relu
activation function for hidden layers
sigmoid
activation function for output layer
"""
def __init__(self):
super(MLPNew, self).__init__()
self.fc1 = nn.Linear(392, 160)
self.fc2 = nn.Linear(160, 64)
self.fc3 = nn.Linear(64, 2)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
EE559DeepLearningEPFL/Project1
|
MLP
| false
| 381
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
SEModule
|
from torch.nn import Module
import torch
import torch.nn as nn
from torch.nn import Conv2d
from torch.nn import ReLU
from torch.nn import Sigmoid
from torch.nn import AdaptiveAvgPool2d
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1,
padding=0, bias=False)
nn.init.xavier_uniform_(self.fc1.weight.data)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1,
padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'reduction': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.nn as nn
from torch.nn import Conv2d
from torch.nn import ReLU
from torch.nn import Sigmoid
from torch.nn import AdaptiveAvgPool2d
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(4)](buf3, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf4, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_1, primals_2, primals_3, buf1, buf3, buf4
class SEModuleNew(Module):
def __init__(self, channels, reduction):
super(SEModuleNew, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1,
padding=0, bias=False)
nn.init.xavier_uniform_(self.fc1.weight.data)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1,
padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Dou-Yu-xuan/pykinship
|
SEModule
| false
| 382
|
[
"MIT"
] | 0
|
f81f6667fa08a08fe726736d05476168b2a3e2f0
|
https://github.com/Dou-Yu-xuan/pykinship/tree/f81f6667fa08a08fe726736d05476168b2a3e2f0
|
TVLoss
|
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.nn.init import *
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum()
w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w
) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.nn.init import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TVLossNew, self).__init__()
self.tv_loss_weight = tv_loss_weight
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
EVA4-RS-Group/Phase2
|
TVLoss
| false
| 383
|
[
"Apache-2.0"
] | 0
|
7c551e3894979cc425dd51baeddbfa5a51b7878d
|
https://github.com/EVA4-RS-Group/Phase2/tree/7c551e3894979cc425dd51baeddbfa5a51b7878d
|
FullAttention
|
from torch.nn import Module
import torch
from torch.nn import Dropout
class FullAttention(Module):
def __init__(self, use_dropout=False, attention_dropout=0.1):
super().__init__()
self.use_dropout = use_dropout
self.dropout = Dropout(attention_dropout)
def forward(self, queries, keys, values, q_mask=None, kv_mask=None):
"""Multi-head scaled dot-product attention, a.k.a full attention.
Args:
queries: [N, L, H, D]
keys: [N, S, H, D]
values: [N, S, H, D]
q_mask: [N, L]
kv_mask: [N, S]
Returns:
queried_values: (N, L, H, D)
"""
QK = torch.einsum('nlhd,nshd->nlsh', queries, keys)
if kv_mask is not None:
QK.masked_fill_(~(q_mask[:, :, None, None] * kv_mask[:, None, :,
None]), float('-inf'))
softmax_temp = 1.0 / queries.size(3) ** 0.5
A = torch.softmax(softmax_temp * QK, dim=2)
if self.use_dropout:
A = self.dropout(A)
queried_values = torch.einsum('nlsh,nshd->nlhd', A, values)
return queried_values.contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch.nn import Dropout
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
triton_poi_fused_clone_1[grid(64, 4)](arg0_1, buf1, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 1, 16), 0)
del buf1
triton_poi_fused__softmax_mul_2[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf2
triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf3
triton_poi_fused_clone_0[grid(256)](arg2_1, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg2_1
buf6 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6)
del buf4
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_clone_0[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
return buf7,
class FullAttentionNew(Module):
def __init__(self, use_dropout=False, attention_dropout=0.1):
super().__init__()
self.use_dropout = use_dropout
self.dropout = Dropout(attention_dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
EStorm21/kornia
|
FullAttention
| false
| 384
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
b2bba7950d748ba0b8ce0cc68035a248799a1044
|
https://github.com/EStorm21/kornia/tree/b2bba7950d748ba0b8ce0cc68035a248799a1044
|
PointLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
def array2samples_distance(array1, array2):
"""
arguments:
array1: the array, size: (num_point, num_feature)
array2: the samples, size: (num_point, num_feature)
returns:
distances: each entry is the distance from a sample to array1
"""
num_point1, _num_features1 = array1.shape
num_point2, num_features2 = array2.shape
expanded_array1 = array1.repeat(num_point2, 1)
expanded_array2 = torch.reshape(torch.unsqueeze(array2, 1).repeat(1,
num_point1, 1), (-1, num_features2))
distances = (expanded_array1 - expanded_array2) * (expanded_array1 -
expanded_array2)
distances = torch.sum(distances, dim=1)
distances = torch.reshape(distances, (num_point2, num_point1))
distances = torch.min(distances, dim=1)[0]
distances = torch.mean(distances)
return distances
def chamfer_distance_numpy(array1, array2):
batch_size, _num_point, _num_features = array1.shape
dist = 0
for i in range(batch_size):
av_dist1 = array2samples_distance(array1[i], array2[i])
av_dist2 = array2samples_distance(array2[i], array1[i])
dist = dist + (0.5 * av_dist1 + 0.5 * av_dist2) / batch_size
return dist * 100
class PointLoss(nn.Module):
def __init__(self):
super(PointLoss, self).__init__()
def forward(self, array1, array2):
return chamfer_distance_numpy(array1, array2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_min_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * (4 * r0 % 4), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * (4 * r0 % 4)), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * (4 * r0 % 4)), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp23 = tl.load(in_ptr0 + 5)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp28 = tl.load(in_ptr0 + 6)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp33 = tl.load(in_ptr0 + 7)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp39 = tl.load(in_ptr0 + 8)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp43 = tl.load(in_ptr0 + 9)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp48 = tl.load(in_ptr0 + 10)
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp53 = tl.load(in_ptr0 + 11)
tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK])
tmp59 = tl.load(in_ptr0 + 12)
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp63 = tl.load(in_ptr0 + 13)
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp68 = tl.load(in_ptr0 + 14)
tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK])
tmp73 = tl.load(in_ptr0 + 15)
tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK])
tmp82 = tl.load(in_ptr1 + 4 * (4 * r0 % 4), None, eviction_policy=
'evict_last')
tmp83 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr1 + (1 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp87 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr1 + (2 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp92 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp96 = tl.load(in_ptr1 + (3 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp97 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp101 = tl.load(in_ptr1 + 4)
tmp102 = tl.broadcast_to(tmp101, [XBLOCK, RBLOCK])
tmp105 = tl.load(in_ptr1 + 5)
tmp106 = tl.broadcast_to(tmp105, [XBLOCK, RBLOCK])
tmp110 = tl.load(in_ptr1 + 6)
tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK])
tmp115 = tl.load(in_ptr1 + 7)
tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK])
tmp121 = tl.load(in_ptr1 + 8)
tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK])
tmp125 = tl.load(in_ptr1 + 9)
tmp126 = tl.broadcast_to(tmp125, [XBLOCK, RBLOCK])
tmp130 = tl.load(in_ptr1 + 10)
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp135 = tl.load(in_ptr1 + 11)
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp141 = tl.load(in_ptr1 + 12)
tmp142 = tl.broadcast_to(tmp141, [XBLOCK, RBLOCK])
tmp145 = tl.load(in_ptr1 + 13)
tmp146 = tl.broadcast_to(tmp145, [XBLOCK, RBLOCK])
tmp150 = tl.load(in_ptr1 + 14)
tmp151 = tl.broadcast_to(tmp150, [XBLOCK, RBLOCK])
tmp155 = tl.load(in_ptr1 + 15)
tmp156 = tl.broadcast_to(tmp155, [XBLOCK, RBLOCK])
tmp164 = tl.load(in_ptr0 + (16 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp165 = tl.load(in_ptr1 + (16 + 4 * r0), None, eviction_policy=
'evict_last')
tmp168 = tl.load(in_ptr0 + (17 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp169 = tl.load(in_ptr1 + (17 + 4 * r0), None, eviction_policy=
'evict_last')
tmp173 = tl.load(in_ptr0 + (18 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp174 = tl.load(in_ptr1 + (18 + 4 * r0), None, eviction_policy=
'evict_last')
tmp178 = tl.load(in_ptr0 + (19 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp179 = tl.load(in_ptr1 + (19 + 4 * r0), None, eviction_policy=
'evict_last')
tmp183 = tl.load(in_ptr0 + 20)
tmp184 = tl.broadcast_to(tmp183, [XBLOCK, RBLOCK])
tmp187 = tl.load(in_ptr0 + 21)
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp192 = tl.load(in_ptr0 + 22)
tmp193 = tl.broadcast_to(tmp192, [XBLOCK, RBLOCK])
tmp197 = tl.load(in_ptr0 + 23)
tmp198 = tl.broadcast_to(tmp197, [XBLOCK, RBLOCK])
tmp203 = tl.load(in_ptr0 + 24)
tmp204 = tl.broadcast_to(tmp203, [XBLOCK, RBLOCK])
tmp207 = tl.load(in_ptr0 + 25)
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp212 = tl.load(in_ptr0 + 26)
tmp213 = tl.broadcast_to(tmp212, [XBLOCK, RBLOCK])
tmp217 = tl.load(in_ptr0 + 27)
tmp218 = tl.broadcast_to(tmp217, [XBLOCK, RBLOCK])
tmp223 = tl.load(in_ptr0 + 28)
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp227 = tl.load(in_ptr0 + 29)
tmp228 = tl.broadcast_to(tmp227, [XBLOCK, RBLOCK])
tmp232 = tl.load(in_ptr0 + 30)
tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK])
tmp237 = tl.load(in_ptr0 + 31)
tmp238 = tl.broadcast_to(tmp237, [XBLOCK, RBLOCK])
tmp246 = tl.load(in_ptr1 + (16 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp247 = tl.load(in_ptr0 + (16 + 4 * r0), None, eviction_policy=
'evict_last')
tmp250 = tl.load(in_ptr1 + (17 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp251 = tl.load(in_ptr0 + (17 + 4 * r0), None, eviction_policy=
'evict_last')
tmp255 = tl.load(in_ptr1 + (18 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp256 = tl.load(in_ptr0 + (18 + 4 * r0), None, eviction_policy=
'evict_last')
tmp260 = tl.load(in_ptr1 + (19 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp261 = tl.load(in_ptr0 + (19 + 4 * r0), None, eviction_policy=
'evict_last')
tmp265 = tl.load(in_ptr1 + 20)
tmp266 = tl.broadcast_to(tmp265, [XBLOCK, RBLOCK])
tmp269 = tl.load(in_ptr1 + 21)
tmp270 = tl.broadcast_to(tmp269, [XBLOCK, RBLOCK])
tmp274 = tl.load(in_ptr1 + 22)
tmp275 = tl.broadcast_to(tmp274, [XBLOCK, RBLOCK])
tmp279 = tl.load(in_ptr1 + 23)
tmp280 = tl.broadcast_to(tmp279, [XBLOCK, RBLOCK])
tmp285 = tl.load(in_ptr1 + 24)
tmp286 = tl.broadcast_to(tmp285, [XBLOCK, RBLOCK])
tmp289 = tl.load(in_ptr1 + 25)
tmp290 = tl.broadcast_to(tmp289, [XBLOCK, RBLOCK])
tmp294 = tl.load(in_ptr1 + 26)
tmp295 = tl.broadcast_to(tmp294, [XBLOCK, RBLOCK])
tmp299 = tl.load(in_ptr1 + 27)
tmp300 = tl.broadcast_to(tmp299, [XBLOCK, RBLOCK])
tmp305 = tl.load(in_ptr1 + 28)
tmp306 = tl.broadcast_to(tmp305, [XBLOCK, RBLOCK])
tmp309 = tl.load(in_ptr1 + 29)
tmp310 = tl.broadcast_to(tmp309, [XBLOCK, RBLOCK])
tmp314 = tl.load(in_ptr1 + 30)
tmp315 = tl.broadcast_to(tmp314, [XBLOCK, RBLOCK])
tmp319 = tl.load(in_ptr1 + 31)
tmp320 = tl.broadcast_to(tmp319, [XBLOCK, RBLOCK])
tmp328 = tl.load(in_ptr0 + (48 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp329 = tl.load(in_ptr1 + (48 + 4 * r0), None, eviction_policy=
'evict_last')
tmp332 = tl.load(in_ptr0 + (49 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp333 = tl.load(in_ptr1 + (49 + 4 * r0), None, eviction_policy=
'evict_last')
tmp337 = tl.load(in_ptr0 + (50 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp338 = tl.load(in_ptr1 + (50 + 4 * r0), None, eviction_policy=
'evict_last')
tmp342 = tl.load(in_ptr0 + (51 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp343 = tl.load(in_ptr1 + (51 + 4 * r0), None, eviction_policy=
'evict_last')
tmp347 = tl.load(in_ptr0 + 52)
tmp348 = tl.broadcast_to(tmp347, [XBLOCK, RBLOCK])
tmp351 = tl.load(in_ptr0 + 53)
tmp352 = tl.broadcast_to(tmp351, [XBLOCK, RBLOCK])
tmp356 = tl.load(in_ptr0 + 54)
tmp357 = tl.broadcast_to(tmp356, [XBLOCK, RBLOCK])
tmp361 = tl.load(in_ptr0 + 55)
tmp362 = tl.broadcast_to(tmp361, [XBLOCK, RBLOCK])
tmp367 = tl.load(in_ptr0 + 56)
tmp368 = tl.broadcast_to(tmp367, [XBLOCK, RBLOCK])
tmp371 = tl.load(in_ptr0 + 57)
tmp372 = tl.broadcast_to(tmp371, [XBLOCK, RBLOCK])
tmp376 = tl.load(in_ptr0 + 58)
tmp377 = tl.broadcast_to(tmp376, [XBLOCK, RBLOCK])
tmp381 = tl.load(in_ptr0 + 59)
tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK])
tmp387 = tl.load(in_ptr0 + 60)
tmp388 = tl.broadcast_to(tmp387, [XBLOCK, RBLOCK])
tmp391 = tl.load(in_ptr0 + 61)
tmp392 = tl.broadcast_to(tmp391, [XBLOCK, RBLOCK])
tmp396 = tl.load(in_ptr0 + 62)
tmp397 = tl.broadcast_to(tmp396, [XBLOCK, RBLOCK])
tmp401 = tl.load(in_ptr0 + 63)
tmp402 = tl.broadcast_to(tmp401, [XBLOCK, RBLOCK])
tmp410 = tl.load(in_ptr0 + (32 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp411 = tl.load(in_ptr1 + (32 + 4 * r0), None, eviction_policy=
'evict_last')
tmp414 = tl.load(in_ptr0 + (33 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp415 = tl.load(in_ptr1 + (33 + 4 * r0), None, eviction_policy=
'evict_last')
tmp419 = tl.load(in_ptr0 + (34 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp420 = tl.load(in_ptr1 + (34 + 4 * r0), None, eviction_policy=
'evict_last')
tmp424 = tl.load(in_ptr0 + (35 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp425 = tl.load(in_ptr1 + (35 + 4 * r0), None, eviction_policy=
'evict_last')
tmp429 = tl.load(in_ptr0 + 36)
tmp430 = tl.broadcast_to(tmp429, [XBLOCK, RBLOCK])
tmp433 = tl.load(in_ptr0 + 37)
tmp434 = tl.broadcast_to(tmp433, [XBLOCK, RBLOCK])
tmp438 = tl.load(in_ptr0 + 38)
tmp439 = tl.broadcast_to(tmp438, [XBLOCK, RBLOCK])
tmp443 = tl.load(in_ptr0 + 39)
tmp444 = tl.broadcast_to(tmp443, [XBLOCK, RBLOCK])
tmp449 = tl.load(in_ptr0 + 40)
tmp450 = tl.broadcast_to(tmp449, [XBLOCK, RBLOCK])
tmp453 = tl.load(in_ptr0 + 41)
tmp454 = tl.broadcast_to(tmp453, [XBLOCK, RBLOCK])
tmp458 = tl.load(in_ptr0 + 42)
tmp459 = tl.broadcast_to(tmp458, [XBLOCK, RBLOCK])
tmp463 = tl.load(in_ptr0 + 43)
tmp464 = tl.broadcast_to(tmp463, [XBLOCK, RBLOCK])
tmp469 = tl.load(in_ptr0 + 44)
tmp470 = tl.broadcast_to(tmp469, [XBLOCK, RBLOCK])
tmp473 = tl.load(in_ptr0 + 45)
tmp474 = tl.broadcast_to(tmp473, [XBLOCK, RBLOCK])
tmp478 = tl.load(in_ptr0 + 46)
tmp479 = tl.broadcast_to(tmp478, [XBLOCK, RBLOCK])
tmp483 = tl.load(in_ptr0 + 47)
tmp484 = tl.broadcast_to(tmp483, [XBLOCK, RBLOCK])
tmp492 = tl.load(in_ptr1 + (48 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp493 = tl.load(in_ptr0 + (48 + 4 * r0), None, eviction_policy=
'evict_last')
tmp496 = tl.load(in_ptr1 + (49 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp497 = tl.load(in_ptr0 + (49 + 4 * r0), None, eviction_policy=
'evict_last')
tmp501 = tl.load(in_ptr1 + (50 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp502 = tl.load(in_ptr0 + (50 + 4 * r0), None, eviction_policy=
'evict_last')
tmp506 = tl.load(in_ptr1 + (51 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp507 = tl.load(in_ptr0 + (51 + 4 * r0), None, eviction_policy=
'evict_last')
tmp511 = tl.load(in_ptr1 + 52)
tmp512 = tl.broadcast_to(tmp511, [XBLOCK, RBLOCK])
tmp515 = tl.load(in_ptr1 + 53)
tmp516 = tl.broadcast_to(tmp515, [XBLOCK, RBLOCK])
tmp520 = tl.load(in_ptr1 + 54)
tmp521 = tl.broadcast_to(tmp520, [XBLOCK, RBLOCK])
tmp525 = tl.load(in_ptr1 + 55)
tmp526 = tl.broadcast_to(tmp525, [XBLOCK, RBLOCK])
tmp531 = tl.load(in_ptr1 + 56)
tmp532 = tl.broadcast_to(tmp531, [XBLOCK, RBLOCK])
tmp535 = tl.load(in_ptr1 + 57)
tmp536 = tl.broadcast_to(tmp535, [XBLOCK, RBLOCK])
tmp540 = tl.load(in_ptr1 + 58)
tmp541 = tl.broadcast_to(tmp540, [XBLOCK, RBLOCK])
tmp545 = tl.load(in_ptr1 + 59)
tmp546 = tl.broadcast_to(tmp545, [XBLOCK, RBLOCK])
tmp551 = tl.load(in_ptr1 + 60)
tmp552 = tl.broadcast_to(tmp551, [XBLOCK, RBLOCK])
tmp555 = tl.load(in_ptr1 + 61)
tmp556 = tl.broadcast_to(tmp555, [XBLOCK, RBLOCK])
tmp560 = tl.load(in_ptr1 + 62)
tmp561 = tl.broadcast_to(tmp560, [XBLOCK, RBLOCK])
tmp565 = tl.load(in_ptr1 + 63)
tmp566 = tl.broadcast_to(tmp565, [XBLOCK, RBLOCK])
tmp574 = tl.load(in_ptr1 + (32 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp575 = tl.load(in_ptr0 + (32 + 4 * r0), None, eviction_policy=
'evict_last')
tmp578 = tl.load(in_ptr1 + (33 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp579 = tl.load(in_ptr0 + (33 + 4 * r0), None, eviction_policy=
'evict_last')
tmp583 = tl.load(in_ptr1 + (34 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp584 = tl.load(in_ptr0 + (34 + 4 * r0), None, eviction_policy=
'evict_last')
tmp588 = tl.load(in_ptr1 + (35 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp589 = tl.load(in_ptr0 + (35 + 4 * r0), None, eviction_policy=
'evict_last')
tmp593 = tl.load(in_ptr1 + 36)
tmp594 = tl.broadcast_to(tmp593, [XBLOCK, RBLOCK])
tmp597 = tl.load(in_ptr1 + 37)
tmp598 = tl.broadcast_to(tmp597, [XBLOCK, RBLOCK])
tmp602 = tl.load(in_ptr1 + 38)
tmp603 = tl.broadcast_to(tmp602, [XBLOCK, RBLOCK])
tmp607 = tl.load(in_ptr1 + 39)
tmp608 = tl.broadcast_to(tmp607, [XBLOCK, RBLOCK])
tmp613 = tl.load(in_ptr1 + 40)
tmp614 = tl.broadcast_to(tmp613, [XBLOCK, RBLOCK])
tmp617 = tl.load(in_ptr1 + 41)
tmp618 = tl.broadcast_to(tmp617, [XBLOCK, RBLOCK])
tmp622 = tl.load(in_ptr1 + 42)
tmp623 = tl.broadcast_to(tmp622, [XBLOCK, RBLOCK])
tmp627 = tl.load(in_ptr1 + 43)
tmp628 = tl.broadcast_to(tmp627, [XBLOCK, RBLOCK])
tmp633 = tl.load(in_ptr1 + 44)
tmp634 = tl.broadcast_to(tmp633, [XBLOCK, RBLOCK])
tmp637 = tl.load(in_ptr1 + 45)
tmp638 = tl.broadcast_to(tmp637, [XBLOCK, RBLOCK])
tmp642 = tl.load(in_ptr1 + 46)
tmp643 = tl.broadcast_to(tmp642, [XBLOCK, RBLOCK])
tmp647 = tl.load(in_ptr1 + 47)
tmp648 = tl.broadcast_to(tmp647, [XBLOCK, RBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tmp20 - tmp1
tmp22 = tmp21 * tmp21
tmp25 = tmp24 - tmp5
tmp26 = tmp25 * tmp25
tmp27 = tmp22 + tmp26
tmp30 = tmp29 - tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp27 + tmp31
tmp35 = tmp34 - tmp15
tmp36 = tmp35 * tmp35
tmp37 = tmp32 + tmp36
tmp38 = triton_helpers.minimum(tmp18, tmp37)
tmp41 = tmp40 - tmp1
tmp42 = tmp41 * tmp41
tmp45 = tmp44 - tmp5
tmp46 = tmp45 * tmp45
tmp47 = tmp42 + tmp46
tmp50 = tmp49 - tmp10
tmp51 = tmp50 * tmp50
tmp52 = tmp47 + tmp51
tmp55 = tmp54 - tmp15
tmp56 = tmp55 * tmp55
tmp57 = tmp52 + tmp56
tmp58 = triton_helpers.minimum(tmp38, tmp57)
tmp61 = tmp60 - tmp1
tmp62 = tmp61 * tmp61
tmp65 = tmp64 - tmp5
tmp66 = tmp65 * tmp65
tmp67 = tmp62 + tmp66
tmp70 = tmp69 - tmp10
tmp71 = tmp70 * tmp70
tmp72 = tmp67 + tmp71
tmp75 = tmp74 - tmp15
tmp76 = tmp75 * tmp75
tmp77 = tmp72 + tmp76
tmp78 = triton_helpers.minimum(tmp58, tmp77)
tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK])
tmp81 = tl.sum(tmp79, 1)[:, None]
tmp84 = tmp82 - tmp83
tmp85 = tmp84 * tmp84
tmp88 = tmp86 - tmp87
tmp89 = tmp88 * tmp88
tmp90 = tmp85 + tmp89
tmp93 = tmp91 - tmp92
tmp94 = tmp93 * tmp93
tmp95 = tmp90 + tmp94
tmp98 = tmp96 - tmp97
tmp99 = tmp98 * tmp98
tmp100 = tmp95 + tmp99
tmp103 = tmp102 - tmp83
tmp104 = tmp103 * tmp103
tmp107 = tmp106 - tmp87
tmp108 = tmp107 * tmp107
tmp109 = tmp104 + tmp108
tmp112 = tmp111 - tmp92
tmp113 = tmp112 * tmp112
tmp114 = tmp109 + tmp113
tmp117 = tmp116 - tmp97
tmp118 = tmp117 * tmp117
tmp119 = tmp114 + tmp118
tmp120 = triton_helpers.minimum(tmp100, tmp119)
tmp123 = tmp122 - tmp83
tmp124 = tmp123 * tmp123
tmp127 = tmp126 - tmp87
tmp128 = tmp127 * tmp127
tmp129 = tmp124 + tmp128
tmp132 = tmp131 - tmp92
tmp133 = tmp132 * tmp132
tmp134 = tmp129 + tmp133
tmp137 = tmp136 - tmp97
tmp138 = tmp137 * tmp137
tmp139 = tmp134 + tmp138
tmp140 = triton_helpers.minimum(tmp120, tmp139)
tmp143 = tmp142 - tmp83
tmp144 = tmp143 * tmp143
tmp147 = tmp146 - tmp87
tmp148 = tmp147 * tmp147
tmp149 = tmp144 + tmp148
tmp152 = tmp151 - tmp92
tmp153 = tmp152 * tmp152
tmp154 = tmp149 + tmp153
tmp157 = tmp156 - tmp97
tmp158 = tmp157 * tmp157
tmp159 = tmp154 + tmp158
tmp160 = triton_helpers.minimum(tmp140, tmp159)
tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK])
tmp163 = tl.sum(tmp161, 1)[:, None]
tmp166 = tmp164 - tmp165
tmp167 = tmp166 * tmp166
tmp170 = tmp168 - tmp169
tmp171 = tmp170 * tmp170
tmp172 = tmp167 + tmp171
tmp175 = tmp173 - tmp174
tmp176 = tmp175 * tmp175
tmp177 = tmp172 + tmp176
tmp180 = tmp178 - tmp179
tmp181 = tmp180 * tmp180
tmp182 = tmp177 + tmp181
tmp185 = tmp184 - tmp165
tmp186 = tmp185 * tmp185
tmp189 = tmp188 - tmp169
tmp190 = tmp189 * tmp189
tmp191 = tmp186 + tmp190
tmp194 = tmp193 - tmp174
tmp195 = tmp194 * tmp194
tmp196 = tmp191 + tmp195
tmp199 = tmp198 - tmp179
tmp200 = tmp199 * tmp199
tmp201 = tmp196 + tmp200
tmp202 = triton_helpers.minimum(tmp182, tmp201)
tmp205 = tmp204 - tmp165
tmp206 = tmp205 * tmp205
tmp209 = tmp208 - tmp169
tmp210 = tmp209 * tmp209
tmp211 = tmp206 + tmp210
tmp214 = tmp213 - tmp174
tmp215 = tmp214 * tmp214
tmp216 = tmp211 + tmp215
tmp219 = tmp218 - tmp179
tmp220 = tmp219 * tmp219
tmp221 = tmp216 + tmp220
tmp222 = triton_helpers.minimum(tmp202, tmp221)
tmp225 = tmp224 - tmp165
tmp226 = tmp225 * tmp225
tmp229 = tmp228 - tmp169
tmp230 = tmp229 * tmp229
tmp231 = tmp226 + tmp230
tmp234 = tmp233 - tmp174
tmp235 = tmp234 * tmp234
tmp236 = tmp231 + tmp235
tmp239 = tmp238 - tmp179
tmp240 = tmp239 * tmp239
tmp241 = tmp236 + tmp240
tmp242 = triton_helpers.minimum(tmp222, tmp241)
tmp243 = tl.broadcast_to(tmp242, [XBLOCK, RBLOCK])
tmp245 = tl.sum(tmp243, 1)[:, None]
tmp248 = tmp246 - tmp247
tmp249 = tmp248 * tmp248
tmp252 = tmp250 - tmp251
tmp253 = tmp252 * tmp252
tmp254 = tmp249 + tmp253
tmp257 = tmp255 - tmp256
tmp258 = tmp257 * tmp257
tmp259 = tmp254 + tmp258
tmp262 = tmp260 - tmp261
tmp263 = tmp262 * tmp262
tmp264 = tmp259 + tmp263
tmp267 = tmp266 - tmp247
tmp268 = tmp267 * tmp267
tmp271 = tmp270 - tmp251
tmp272 = tmp271 * tmp271
tmp273 = tmp268 + tmp272
tmp276 = tmp275 - tmp256
tmp277 = tmp276 * tmp276
tmp278 = tmp273 + tmp277
tmp281 = tmp280 - tmp261
tmp282 = tmp281 * tmp281
tmp283 = tmp278 + tmp282
tmp284 = triton_helpers.minimum(tmp264, tmp283)
tmp287 = tmp286 - tmp247
tmp288 = tmp287 * tmp287
tmp291 = tmp290 - tmp251
tmp292 = tmp291 * tmp291
tmp293 = tmp288 + tmp292
tmp296 = tmp295 - tmp256
tmp297 = tmp296 * tmp296
tmp298 = tmp293 + tmp297
tmp301 = tmp300 - tmp261
tmp302 = tmp301 * tmp301
tmp303 = tmp298 + tmp302
tmp304 = triton_helpers.minimum(tmp284, tmp303)
tmp307 = tmp306 - tmp247
tmp308 = tmp307 * tmp307
tmp311 = tmp310 - tmp251
tmp312 = tmp311 * tmp311
tmp313 = tmp308 + tmp312
tmp316 = tmp315 - tmp256
tmp317 = tmp316 * tmp316
tmp318 = tmp313 + tmp317
tmp321 = tmp320 - tmp261
tmp322 = tmp321 * tmp321
tmp323 = tmp318 + tmp322
tmp324 = triton_helpers.minimum(tmp304, tmp323)
tmp325 = tl.broadcast_to(tmp324, [XBLOCK, RBLOCK])
tmp327 = tl.sum(tmp325, 1)[:, None]
tmp330 = tmp328 - tmp329
tmp331 = tmp330 * tmp330
tmp334 = tmp332 - tmp333
tmp335 = tmp334 * tmp334
tmp336 = tmp331 + tmp335
tmp339 = tmp337 - tmp338
tmp340 = tmp339 * tmp339
tmp341 = tmp336 + tmp340
tmp344 = tmp342 - tmp343
tmp345 = tmp344 * tmp344
tmp346 = tmp341 + tmp345
tmp349 = tmp348 - tmp329
tmp350 = tmp349 * tmp349
tmp353 = tmp352 - tmp333
tmp354 = tmp353 * tmp353
tmp355 = tmp350 + tmp354
tmp358 = tmp357 - tmp338
tmp359 = tmp358 * tmp358
tmp360 = tmp355 + tmp359
tmp363 = tmp362 - tmp343
tmp364 = tmp363 * tmp363
tmp365 = tmp360 + tmp364
tmp366 = triton_helpers.minimum(tmp346, tmp365)
tmp369 = tmp368 - tmp329
tmp370 = tmp369 * tmp369
tmp373 = tmp372 - tmp333
tmp374 = tmp373 * tmp373
tmp375 = tmp370 + tmp374
tmp378 = tmp377 - tmp338
tmp379 = tmp378 * tmp378
tmp380 = tmp375 + tmp379
tmp383 = tmp382 - tmp343
tmp384 = tmp383 * tmp383
tmp385 = tmp380 + tmp384
tmp386 = triton_helpers.minimum(tmp366, tmp385)
tmp389 = tmp388 - tmp329
tmp390 = tmp389 * tmp389
tmp393 = tmp392 - tmp333
tmp394 = tmp393 * tmp393
tmp395 = tmp390 + tmp394
tmp398 = tmp397 - tmp338
tmp399 = tmp398 * tmp398
tmp400 = tmp395 + tmp399
tmp403 = tmp402 - tmp343
tmp404 = tmp403 * tmp403
tmp405 = tmp400 + tmp404
tmp406 = triton_helpers.minimum(tmp386, tmp405)
tmp407 = tl.broadcast_to(tmp406, [XBLOCK, RBLOCK])
tmp409 = tl.sum(tmp407, 1)[:, None]
tmp412 = tmp410 - tmp411
tmp413 = tmp412 * tmp412
tmp416 = tmp414 - tmp415
tmp417 = tmp416 * tmp416
tmp418 = tmp413 + tmp417
tmp421 = tmp419 - tmp420
tmp422 = tmp421 * tmp421
tmp423 = tmp418 + tmp422
tmp426 = tmp424 - tmp425
tmp427 = tmp426 * tmp426
tmp428 = tmp423 + tmp427
tmp431 = tmp430 - tmp411
tmp432 = tmp431 * tmp431
tmp435 = tmp434 - tmp415
tmp436 = tmp435 * tmp435
tmp437 = tmp432 + tmp436
tmp440 = tmp439 - tmp420
tmp441 = tmp440 * tmp440
tmp442 = tmp437 + tmp441
tmp445 = tmp444 - tmp425
tmp446 = tmp445 * tmp445
tmp447 = tmp442 + tmp446
tmp448 = triton_helpers.minimum(tmp428, tmp447)
tmp451 = tmp450 - tmp411
tmp452 = tmp451 * tmp451
tmp455 = tmp454 - tmp415
tmp456 = tmp455 * tmp455
tmp457 = tmp452 + tmp456
tmp460 = tmp459 - tmp420
tmp461 = tmp460 * tmp460
tmp462 = tmp457 + tmp461
tmp465 = tmp464 - tmp425
tmp466 = tmp465 * tmp465
tmp467 = tmp462 + tmp466
tmp468 = triton_helpers.minimum(tmp448, tmp467)
tmp471 = tmp470 - tmp411
tmp472 = tmp471 * tmp471
tmp475 = tmp474 - tmp415
tmp476 = tmp475 * tmp475
tmp477 = tmp472 + tmp476
tmp480 = tmp479 - tmp420
tmp481 = tmp480 * tmp480
tmp482 = tmp477 + tmp481
tmp485 = tmp484 - tmp425
tmp486 = tmp485 * tmp485
tmp487 = tmp482 + tmp486
tmp488 = triton_helpers.minimum(tmp468, tmp487)
tmp489 = tl.broadcast_to(tmp488, [XBLOCK, RBLOCK])
tmp491 = tl.sum(tmp489, 1)[:, None]
tmp494 = tmp492 - tmp493
tmp495 = tmp494 * tmp494
tmp498 = tmp496 - tmp497
tmp499 = tmp498 * tmp498
tmp500 = tmp495 + tmp499
tmp503 = tmp501 - tmp502
tmp504 = tmp503 * tmp503
tmp505 = tmp500 + tmp504
tmp508 = tmp506 - tmp507
tmp509 = tmp508 * tmp508
tmp510 = tmp505 + tmp509
tmp513 = tmp512 - tmp493
tmp514 = tmp513 * tmp513
tmp517 = tmp516 - tmp497
tmp518 = tmp517 * tmp517
tmp519 = tmp514 + tmp518
tmp522 = tmp521 - tmp502
tmp523 = tmp522 * tmp522
tmp524 = tmp519 + tmp523
tmp527 = tmp526 - tmp507
tmp528 = tmp527 * tmp527
tmp529 = tmp524 + tmp528
tmp530 = triton_helpers.minimum(tmp510, tmp529)
tmp533 = tmp532 - tmp493
tmp534 = tmp533 * tmp533
tmp537 = tmp536 - tmp497
tmp538 = tmp537 * tmp537
tmp539 = tmp534 + tmp538
tmp542 = tmp541 - tmp502
tmp543 = tmp542 * tmp542
tmp544 = tmp539 + tmp543
tmp547 = tmp546 - tmp507
tmp548 = tmp547 * tmp547
tmp549 = tmp544 + tmp548
tmp550 = triton_helpers.minimum(tmp530, tmp549)
tmp553 = tmp552 - tmp493
tmp554 = tmp553 * tmp553
tmp557 = tmp556 - tmp497
tmp558 = tmp557 * tmp557
tmp559 = tmp554 + tmp558
tmp562 = tmp561 - tmp502
tmp563 = tmp562 * tmp562
tmp564 = tmp559 + tmp563
tmp567 = tmp566 - tmp507
tmp568 = tmp567 * tmp567
tmp569 = tmp564 + tmp568
tmp570 = triton_helpers.minimum(tmp550, tmp569)
tmp571 = tl.broadcast_to(tmp570, [XBLOCK, RBLOCK])
tmp573 = tl.sum(tmp571, 1)[:, None]
tmp576 = tmp574 - tmp575
tmp577 = tmp576 * tmp576
tmp580 = tmp578 - tmp579
tmp581 = tmp580 * tmp580
tmp582 = tmp577 + tmp581
tmp585 = tmp583 - tmp584
tmp586 = tmp585 * tmp585
tmp587 = tmp582 + tmp586
tmp590 = tmp588 - tmp589
tmp591 = tmp590 * tmp590
tmp592 = tmp587 + tmp591
tmp595 = tmp594 - tmp575
tmp596 = tmp595 * tmp595
tmp599 = tmp598 - tmp579
tmp600 = tmp599 * tmp599
tmp601 = tmp596 + tmp600
tmp604 = tmp603 - tmp584
tmp605 = tmp604 * tmp604
tmp606 = tmp601 + tmp605
tmp609 = tmp608 - tmp589
tmp610 = tmp609 * tmp609
tmp611 = tmp606 + tmp610
tmp612 = triton_helpers.minimum(tmp592, tmp611)
tmp615 = tmp614 - tmp575
tmp616 = tmp615 * tmp615
tmp619 = tmp618 - tmp579
tmp620 = tmp619 * tmp619
tmp621 = tmp616 + tmp620
tmp624 = tmp623 - tmp584
tmp625 = tmp624 * tmp624
tmp626 = tmp621 + tmp625
tmp629 = tmp628 - tmp589
tmp630 = tmp629 * tmp629
tmp631 = tmp626 + tmp630
tmp632 = triton_helpers.minimum(tmp612, tmp631)
tmp635 = tmp634 - tmp575
tmp636 = tmp635 * tmp635
tmp639 = tmp638 - tmp579
tmp640 = tmp639 * tmp639
tmp641 = tmp636 + tmp640
tmp644 = tmp643 - tmp584
tmp645 = tmp644 * tmp644
tmp646 = tmp641 + tmp645
tmp649 = tmp648 - tmp589
tmp650 = tmp649 * tmp649
tmp651 = tmp646 + tmp650
tmp652 = triton_helpers.minimum(tmp632, tmp651)
tmp653 = tl.broadcast_to(tmp652, [XBLOCK, RBLOCK])
tmp655 = tl.sum(tmp653, 1)[:, None]
tmp656 = 4.0
tmp657 = tmp81 / tmp656
tmp658 = 0.5
tmp659 = tmp657 * tmp658
tmp660 = tmp163 / tmp656
tmp661 = tmp660 * tmp658
tmp662 = tmp659 + tmp661
tmp663 = 0.25
tmp664 = tmp662 * tmp663
tmp665 = 0.0
tmp666 = tmp664 + tmp665
tmp667 = tmp245 / tmp656
tmp668 = tmp667 * tmp658
tmp669 = tmp327 / tmp656
tmp670 = tmp669 * tmp658
tmp671 = tmp668 + tmp670
tmp672 = tmp671 * tmp663
tmp673 = tmp666 + tmp672
tmp674 = tmp491 / tmp656
tmp675 = tmp674 * tmp658
tmp676 = tmp655 / tmp656
tmp677 = tmp676 * tmp658
tmp678 = tmp675 + tmp677
tmp679 = tmp678 * tmp663
tmp680 = tmp673 + tmp679
tmp681 = tmp409 / tmp656
tmp682 = tmp681 * tmp658
tmp683 = tmp573 / tmp656
tmp684 = tmp683 * tmp658
tmp685 = tmp682 + tmp684
tmp686 = tmp685 * tmp663
tmp687 = tmp680 + tmp686
tmp688 = 100.0
tmp689 = tmp687 * tmp688
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp689, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10
del buf10
buf17 = buf14
del buf14
get_raw_stream(0)
triton_per_fused_add_div_mean_min_mul_0[grid(1)](buf17, arg0_1,
arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf17,
def array2samples_distance(array1, array2):
"""
arguments:
array1: the array, size: (num_point, num_feature)
array2: the samples, size: (num_point, num_feature)
returns:
distances: each entry is the distance from a sample to array1
"""
num_point1, _num_features1 = array1.shape
num_point2, num_features2 = array2.shape
expanded_array1 = array1.repeat(num_point2, 1)
expanded_array2 = torch.reshape(torch.unsqueeze(array2, 1).repeat(1,
num_point1, 1), (-1, num_features2))
distances = (expanded_array1 - expanded_array2) * (expanded_array1 -
expanded_array2)
distances = torch.sum(distances, dim=1)
distances = torch.reshape(distances, (num_point2, num_point1))
distances = torch.min(distances, dim=1)[0]
distances = torch.mean(distances)
return distances
def chamfer_distance_numpy(array1, array2):
batch_size, _num_point, _num_features = array1.shape
dist = 0
for i in range(batch_size):
av_dist1 = array2samples_distance(array1[i], array2[i])
av_dist2 = array2samples_distance(array2[i], array1[i])
dist = dist + (0.5 * av_dist1 + 0.5 * av_dist2) / batch_size
return dist * 100
class PointLossNew(nn.Module):
def __init__(self):
super(PointLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DreamBlack/APCNet
|
PointLoss
| false
| 385
|
[
"MIT"
] | 0
|
d76bc9e46c3b631035c5c67e2367b6fb80621333
|
https://github.com/DreamBlack/APCNet/tree/d76bc9e46c3b631035c5c67e2367b6fb80621333
|
Hflip
|
import torch
import torch.nn as nn
def hflip(input: 'torch.Tensor') ->torch.Tensor:
"""Horizontally flip a tensor image or a batch of tensor images.
.. image:: _static/img/hflip.png
Input must be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input: input tensor.
Returns:
The horizontally flipped image tensor.
"""
w = input.shape[-1]
return input[..., torch.arange(w - 1, -1, -1, device=input.device)]
class Hflip(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images.
Input must be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input: input tensor.
Returns:
The horizontally flipped image tensor.
Examples:
>>> hflip = Hflip()
>>> input = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 1.]
... ]]])
>>> hflip(input)
tensor([[[[0., 0., 0.],
[0., 0., 0.],
[1., 1., 0.]]]])
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return hflip(input)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (3 + -1 * x0 + 4 * x1), xmask, eviction_policy
='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hflip(input: 'torch.Tensor') ->torch.Tensor:
"""Horizontally flip a tensor image or a batch of tensor images.
.. image:: _static/img/hflip.png
Input must be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input: input tensor.
Returns:
The horizontally flipped image tensor.
"""
w = input.shape[-1]
return input[..., torch.arange(w - 1, -1, -1, device=input.device)]
class HflipNew(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images.
Input must be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input: input tensor.
Returns:
The horizontally flipped image tensor.
Examples:
>>> hflip = Hflip()
>>> input = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 1.]
... ]]])
>>> hflip(input)
tensor([[[[0., 0., 0.],
[0., 0., 0.],
[1., 1., 0.]]]])
"""
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
EStorm21/kornia
|
Hflip
| false
| 386
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
b2bba7950d748ba0b8ce0cc68035a248799a1044
|
https://github.com/EStorm21/kornia/tree/b2bba7950d748ba0b8ce0cc68035a248799a1044
|
normalize
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
from torch.nn.init import *
class normalize(nn.Module):
def __init__(self):
super(normalize, self).__init__()
def forward(self, x):
x = F.normalize(x, p=2, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.nn.init import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class normalizeNew(nn.Module):
def __init__(self):
super(normalizeNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
EVA4-RS-Group/Phase2
|
normalize
| false
| 387
|
[
"Apache-2.0"
] | 0
|
7c551e3894979cc425dd51baeddbfa5a51b7878d
|
https://github.com/EVA4-RS-Group/Phase2/tree/7c551e3894979cc425dd51baeddbfa5a51b7878d
|
AuxsiameseMLP
|
import torch
from torch import nn
from torch.nn import functional as F
class AuxsiameseMLP(nn.Module):
"""
basic structure similar to the MLP
input is splited into two 1*14*14 images for separating training, share the same parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxsiameseMLP, self).__init__()
self.fc1 = nn.Linear(196, 160)
self.fc2 = nn.Linear(160, 10)
self.fc3 = nn.Linear(20, 2)
def forward(self, x1, x2):
x1 = x1.view(-1, 196)
x1 = F.relu(self.fc1(x1))
x1 = self.fc2(x1)
x2 = x2.view(-1, 196)
x2 = F.relu(self.fc1(x2))
x2 = self.fc2(x2)
x = torch.cat([x1, x2], dim=1)
x = F.relu(x)
x = torch.sigmoid(self.fc3(x))
return x, x1, x2
def get_inputs():
return [torch.rand([4, 196]), torch.rand([4, 196])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 160
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (10 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 20, tl.int64)
tmp9 = tl.load(in_ptr1 + (10 * x1 + (-10 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 196), (196, 1))
assert_size_stride(primals_2, (160, 196), (196, 1))
assert_size_stride(primals_3, (160,), (1,))
assert_size_stride(primals_4, (10, 160), (160, 1))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (4, 196), (196, 1))
assert_size_stride(primals_7, (2, 20), (20, 1))
assert_size_stride(primals_8, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 160), (160, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (196,
160), (1, 196), 0), out=buf0)
buf3 = empty_strided_cuda((4, 160), (160, 1), torch.float32)
extern_kernels.mm(primals_6, reinterpret_tensor(primals_2, (196,
160), (1, 196), 0), out=buf3)
del primals_2
buf1 = buf0
del buf0
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused_relu_0[grid(640)](buf1, buf4, primals_3, 640,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(160, 10), (1, 160), 0), alpha=1, beta=1, out=buf2)
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf4, reinterpret_tensor(primals_4,
(160, 10), (1, 160), 0), alpha=1, beta=1, out=buf5)
del primals_5
buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
triton_poi_fused_cat_relu_1[grid(80)](buf2, buf5, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_7, (20, 2), (1,
20), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_sigmoid_2[grid(8)](buf8, primals_8, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_8
return (buf8, buf2, buf5, primals_1, buf1, primals_6, buf4, buf6, buf8,
primals_7, primals_4)
class AuxsiameseMLPNew(nn.Module):
"""
basic structure similar to the MLP
input is splited into two 1*14*14 images for separating training, share the same parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxsiameseMLPNew, self).__init__()
self.fc1 = nn.Linear(196, 160)
self.fc2 = nn.Linear(160, 10)
self.fc3 = nn.Linear(20, 2)
def forward(self, input_0, input_1):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1], output[2]
|
EE559DeepLearningEPFL/Project1
|
AuxsiameseMLP
| false
| 388
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
SevenLayerFC_Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SevenLayerFC_Net(nn.Module):
def __init__(self, D_in, H, D_out):
"""
In the constructor we instantiate three nn.Linear modules and assign them as
member variables.
"""
super(SevenLayerFC_Net, self).__init__()
self.linear1 = torch.nn.Linear(D_in, 2048)
self.linear2 = torch.nn.Linear(2048, 1024)
self.linear3 = torch.nn.Linear(1024, 512)
self.linear4 = torch.nn.Linear(512, 256)
self.linear5 = torch.nn.Linear(256, 128)
self.linear6 = torch.nn.Linear(128, D_out)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = F.relu(self.linear4(x))
x = F.relu(self.linear5(x))
x = self.linear6(x)
return F.log_softmax(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D_in': 4, 'H': 4, 'D_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__log_softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (2048, 4), (4, 1))
assert_size_stride(primals_2, (2048,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1024, 2048), (2048, 1))
assert_size_stride(primals_5, (1024,), (1,))
assert_size_stride(primals_6, (512, 1024), (1024, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (256, 512), (512, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256), (256, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1,
primals_2, buf17, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0
), reinterpret_tensor(primals_4, (2048, 1024), (1, 2048), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1024), (16384, 4096, 1024,
1), 0)
del buf2
buf16 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(65536)](buf3,
primals_5, buf16, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), reinterpret_tensor(primals_6, (1024, 512), (1, 1024), 0),
out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf4
buf15 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(32768)](buf5,
primals_7, buf15, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 512), (512, 1), 0),
reinterpret_tensor(primals_8, (512, 256), (1, 512), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf6
buf14 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(16384)](buf7,
primals_9, buf14, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_10, (256, 128), (1, 256), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf8
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(8192)](buf9,
primals_11, buf13, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf10)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_5[grid(256)](buf10, buf11, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
triton_poi_fused__log_softmax_6[grid(256)](buf11, buf12, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf11
return (buf12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0),
reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0),
reinterpret_tensor(buf5, (64, 512), (512, 1), 0),
reinterpret_tensor(buf7, (64, 256), (256, 1), 0),
reinterpret_tensor(buf9, (64, 128), (128, 1), 0), buf12, primals_12,
buf13, primals_10, buf14, primals_8, buf15, primals_6, buf16,
primals_4, buf17)
class SevenLayerFC_NetNew(nn.Module):
def __init__(self, D_in, H, D_out):
"""
In the constructor we instantiate three nn.Linear modules and assign them as
member variables.
"""
super(SevenLayerFC_NetNew, self).__init__()
self.linear1 = torch.nn.Linear(D_in, 2048)
self.linear2 = torch.nn.Linear(2048, 1024)
self.linear3 = torch.nn.Linear(1024, 512)
self.linear4 = torch.nn.Linear(512, 256)
self.linear5 = torch.nn.Linear(256, 128)
self.linear6 = torch.nn.Linear(128, D_out)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.linear4.weight
primals_9 = self.linear4.bias
primals_10 = self.linear5.weight
primals_11 = self.linear5.bias
primals_12 = self.linear6.weight
primals_13 = self.linear6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Darkhunter9/10-605_cifar100
|
SevenLayerFC_Net
| false
| 389
|
[
"MIT"
] | 0
|
97c171c7b3ad74bcb0376c2ec23083f1a0b9417d
|
https://github.com/Darkhunter9/10-605_cifar100/tree/97c171c7b3ad74bcb0376c2ec23083f1a0b9417d
|
JointsMSELoss
|
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.nn.init import *
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(size_average=True)
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1
)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(heatmap_pred.mul(target_weight
[:, idx]), heatmap_gt.mul(target_weight[:, idx]))
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.nn.init import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp12 = tmp10 * tmp11
tmp14 = tmp13 * tmp11
tmp15 = tmp12 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp22 = tmp20 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp22 - tmp24
tmp26 = tmp25 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp32 = tmp30 * tmp31
tmp34 = tmp33 * tmp31
tmp35 = tmp32 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.sum(tmp37, 1)[:, None]
tmp40 = 4.0
tmp41 = tmp9 / tmp40
tmp42 = 0.5
tmp43 = tmp41 * tmp42
tmp44 = 0.0
tmp45 = tmp43 + tmp44
tmp46 = tmp19 / tmp40
tmp47 = tmp46 * tmp42
tmp48 = tmp45 + tmp47
tmp49 = tmp29 / tmp40
tmp50 = tmp49 * tmp42
tmp51 = tmp48 + tmp50
tmp52 = tmp39 / tmp40
tmp53 = tmp52 * tmp42
tmp54 = tmp51 + tmp53
tmp55 = 0.25
tmp56 = tmp54 * tmp55
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp56, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf4, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf4,
class JointsMSELossNew(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELossNew, self).__init__()
self.criterion = nn.MSELoss(size_average=True)
self.use_target_weight = use_target_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
EVA4-RS-Group/Phase2
|
JointsMSELoss
| false
| 390
|
[
"Apache-2.0"
] | 0
|
7c551e3894979cc425dd51baeddbfa5a51b7878d
|
https://github.com/EVA4-RS-Group/Phase2/tree/7c551e3894979cc425dd51baeddbfa5a51b7878d
|
SiameseMLP
|
import torch
from torch import nn
from torch.nn import functional as F
class SiameseMLP(nn.Module):
"""
basic structure similar to the MLP
input is splited into two 1*14*14 images for separating training, share the same parameters
"""
def __init__(self):
super(SiameseMLP, self).__init__()
self.fc1 = nn.Linear(196, 160)
self.fc2 = nn.Linear(160, 10)
self.fc3 = nn.Linear(20, 2)
def forward(self, x1, x2):
x1 = x1.view(-1, 196)
x1 = F.relu(self.fc1(x1))
x1 = self.fc2(x1)
x1 = F.relu(x1)
x2 = x2.view(-1, 196)
x2 = F.relu(self.fc1(x2))
x2 = self.fc2(x2)
x2 = F.relu(x2)
x = torch.cat([x1, x2], dim=1)
x = torch.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 196]), torch.rand([4, 196])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 160
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (10 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 20, tl.int64)
tmp15 = tl.load(in_ptr2 + (10 * x1 + (-10 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (-10 + x0), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp10 = tmp9 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 196), (196, 1))
assert_size_stride(primals_2, (160, 196), (196, 1))
assert_size_stride(primals_3, (160,), (1,))
assert_size_stride(primals_4, (10, 160), (160, 1))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (4, 196), (196, 1))
assert_size_stride(primals_7, (2, 20), (20, 1))
assert_size_stride(primals_8, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 160), (160, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (196,
160), (1, 196), 0), out=buf0)
buf3 = empty_strided_cuda((4, 160), (160, 1), torch.float32)
extern_kernels.mm(primals_6, reinterpret_tensor(primals_2, (196,
160), (1, 196), 0), out=buf3)
del primals_2
buf1 = buf0
del buf0
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused_relu_0[grid(640)](buf1, buf4, primals_3, 640,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (160, 10), (1,
160), 0), out=buf2)
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (160, 10), (1,
160), 0), out=buf5)
buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
triton_poi_fused_cat_1[grid(80)](buf2, primals_5, buf5, buf6, 80,
XBLOCK=128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_7, (20, 2), (1,
20), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_sigmoid_2[grid(8)](buf8, primals_8, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_8
buf9 = empty_strided_cuda((4, 10), (10, 1), torch.bool)
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(40)](buf5,
primals_5, buf2, buf9, buf10, 40, XBLOCK=64, num_warps=1,
num_stages=1)
del buf2
del buf5
del primals_5
return (buf8, primals_1, buf1, primals_6, buf4, buf6, buf8, primals_7,
buf9, primals_4, buf10)
class SiameseMLPNew(nn.Module):
"""
basic structure similar to the MLP
input is splited into two 1*14*14 images for separating training, share the same parameters
"""
def __init__(self):
super(SiameseMLPNew, self).__init__()
self.fc1 = nn.Linear(196, 160)
self.fc2 = nn.Linear(160, 10)
self.fc3 = nn.Linear(20, 2)
def forward(self, input_0, input_1):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
EE559DeepLearningEPFL/Project1
|
SiameseMLP
| false
| 391
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
SingleKaistAutoEncoder
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def kaiming_init(module, mode='fan_out', nonlinearity='relu', bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(module.weight, mode=mode, nonlinearity=
nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, mode=mode, nonlinearity=
nonlinearity)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias)
class SingleKaistAutoEncoder(nn.Module):
def __init__(self):
super(SingleKaistAutoEncoder, self).__init__()
self.conv1_rgb = nn.Conv2d(3, 32, 5, 4, 2)
self.conv2_rgb = nn.Conv2d(32, 64, 3, 2, 1)
self.conv3_rgb = nn.Conv2d(64, 256, 3, 2, 1)
self.de_conv1_rgb = nn.ConvTranspose2d(256, 64, kernel_size=(4, 4),
stride=(2, 2), padding=(1, 1))
self.de_conv2_rgb = nn.ConvTranspose2d(64, 32, kernel_size=(4, 4),
stride=(2, 2), padding=(1, 1))
self.de_conv3_rgb = nn.ConvTranspose2d(32, 3, kernel_size=(6, 6),
stride=(4, 4), padding=(1, 1))
def bilinear_kernel(self, in_channels, out_channels, kernel_size):
"""
return a bilinear filter tensor
"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center
) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size,
kernel_size), dtype='float32')
for i in range(in_channels):
for j in range(out_channels):
weight[i, j, :, :] = filt
return torch.from_numpy(weight)
def init_weights(self):
kaiming_init(self.conv1_rgb)
kaiming_init(self.conv2_rgb)
kaiming_init(self.conv3_rgb)
self.de_conv1_rgb.weight.data = self.bilinear_kernel(self.
de_conv1_rgb.in_channels, self.de_conv1_rgb.out_channels, 4)
self.de_conv2_rgb.weight.data = self.bilinear_kernel(self.
de_conv2_rgb.in_channels, self.de_conv2_rgb.out_channels, 4)
self.de_conv3_rgb.weight.data = self.bilinear_kernel(self.
de_conv3_rgb.in_channels, self.de_conv3_rgb.out_channels, 6)
def forward(self, img_rgb):
rgb_down_2x = F.relu(self.conv1_rgb(img_rgb), True)
rgb_down_4x = F.relu(self.conv2_rgb(rgb_down_2x), True)
code = F.relu(self.conv3_rgb(rgb_down_4x), True)
rgb_up_2x = F.relu(self.de_conv1_rgb(code), True)
rgb_up_4x = F.relu(self.de_conv2_rgb(rgb_up_2x), True)
decode = torch.tanh(self.de_conv3_rgb(rgb_up_4x))
return code, decode
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 108 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 4096 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 4096 * y1), tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_tanh_10(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 3, 6, 6), (108, 36, 6, 1))
assert_size_stride(primals_13, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 5, 5), (75, 1, 15, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 25)](primals_1, buf0, 96, 25, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(16384, 9)](primals_6, buf3, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_4[grid(16384, 16)](primals_8, buf4, 16384, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
triton_poi_fused_5[grid(2048, 16)](primals_10, buf5, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((32, 3, 6, 6), (108, 1, 18, 3), torch.float32
)
triton_poi_fused_6[grid(96, 36)](primals_12, buf6, 96, 36, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_12
buf7 = extern_kernels.convolution(buf1, buf0, stride=(4, 4),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 16, 16), (8192, 1, 512, 32))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_7[grid(32768)](buf8, primals_2,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf9 = extern_kernels.convolution(buf8, buf2, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 8, 8), (4096, 1, 512, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_8[grid(16384)](buf10, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf11 = extern_kernels.convolution(buf10, buf3, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf12 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
buf13 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.float32)
triton_poi_fused_convolution_relu_9[grid(1024, 16)](buf11,
primals_7, buf12, buf13, 1024, 16, XBLOCK=16, YBLOCK=16,
num_warps=4, num_stages=1)
del buf11
del primals_7
buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 8, 8), (4096, 1, 512, 64))
del buf13
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_8[grid(16384)](buf15, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf16 = extern_kernels.convolution(buf15, buf5, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 32, 16, 16), (8192, 1, 512, 32))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_7[grid(32768)](buf17, primals_11,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf18 = extern_kernels.convolution(buf17, buf6, stride=(4, 4),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 3, 64, 64), (12288, 1, 192, 3))
buf19 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_tanh_10[grid(12, 4096)](buf18,
primals_13, buf19, 12, 4096, XBLOCK=512, YBLOCK=1, num_warps=4,
num_stages=1)
del buf18
del primals_13
return (buf12, buf19, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf8,
buf10, buf12, buf15, buf17, buf19)
def kaiming_init(module, mode='fan_out', nonlinearity='relu', bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(module.weight, mode=mode, nonlinearity=
nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, mode=mode, nonlinearity=
nonlinearity)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias)
class SingleKaistAutoEncoderNew(nn.Module):
def __init__(self):
super(SingleKaistAutoEncoderNew, self).__init__()
self.conv1_rgb = nn.Conv2d(3, 32, 5, 4, 2)
self.conv2_rgb = nn.Conv2d(32, 64, 3, 2, 1)
self.conv3_rgb = nn.Conv2d(64, 256, 3, 2, 1)
self.de_conv1_rgb = nn.ConvTranspose2d(256, 64, kernel_size=(4, 4),
stride=(2, 2), padding=(1, 1))
self.de_conv2_rgb = nn.ConvTranspose2d(64, 32, kernel_size=(4, 4),
stride=(2, 2), padding=(1, 1))
self.de_conv3_rgb = nn.ConvTranspose2d(32, 3, kernel_size=(6, 6),
stride=(4, 4), padding=(1, 1))
def bilinear_kernel(self, in_channels, out_channels, kernel_size):
"""
return a bilinear filter tensor
"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center
) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size,
kernel_size), dtype='float32')
for i in range(in_channels):
for j in range(out_channels):
weight[i, j, :, :] = filt
return torch.from_numpy(weight)
def init_weights(self):
kaiming_init(self.conv1_rgb)
kaiming_init(self.conv2_rgb)
kaiming_init(self.conv3_rgb)
self.de_conv1_rgb.weight.data = self.bilinear_kernel(self.
de_conv1_rgb.in_channels, self.de_conv1_rgb.out_channels, 4)
self.de_conv2_rgb.weight.data = self.bilinear_kernel(self.
de_conv2_rgb.in_channels, self.de_conv2_rgb.out_channels, 4)
self.de_conv3_rgb.weight.data = self.bilinear_kernel(self.
de_conv3_rgb.in_channels, self.de_conv3_rgb.out_channels, 6)
def forward(self, input_0):
primals_1 = self.conv1_rgb.weight
primals_2 = self.conv1_rgb.bias
primals_4 = self.conv2_rgb.weight
primals_5 = self.conv2_rgb.bias
primals_6 = self.conv3_rgb.weight
primals_7 = self.conv3_rgb.bias
primals_8 = self.de_conv1_rgb.weight
primals_9 = self.de_conv1_rgb.bias
primals_10 = self.de_conv2_rgb.weight
primals_11 = self.de_conv2_rgb.bias
primals_12 = self.de_conv3_rgb.weight
primals_13 = self.de_conv3_rgb.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
DouCir/mmdetection
|
SingleKaistAutoEncoder
| false
| 392
|
[
"Apache-2.0"
] | 0
|
44613202c379d85315ed47ca670fd9853f90c3a5
|
https://github.com/DouCir/mmdetection/tree/44613202c379d85315ed47ca670fd9853f90c3a5
|
EPE
|
import torch
import torch.nn as nn
class EPE(nn.Module):
def __init__(self):
super(EPE, self).__init__()
def forward(self, flow, gt, loss_mask):
loss_map = (flow - gt.detach()) ** 2
loss_map = (loss_map.sum(1, True) + 1e-06) ** 0.5
return loss_map * loss_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 1e-06
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sub_sum_0[grid(64)](arg1_1, arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_sub_sum_1[grid(256)](buf0, arg2_1,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del buf0
return buf1,
class EPENew(nn.Module):
def __init__(self):
super(EPENew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Egazaga/arXiv2020-RIFE
|
EPE
| false
| 393
|
[
"MIT"
] | 0
|
84b050e168a682905a9dde8aa15437a4994f2abf
|
https://github.com/Egazaga/arXiv2020-RIFE/tree/84b050e168a682905a9dde8aa15437a4994f2abf
|
BiencoderLoss
|
import torch
import torch.nn as nn
from torch import Tensor as T
import torch.nn.functional as F
class BiencoderLoss(nn.Module):
def __init__(self):
super(BiencoderLoss, self).__init__()
def forward(self, q_vectors: 'T', p_vectors: 'T'):
score_matrix = torch.mm(q_vectors, torch.transpose(p_vectors, 0, 1))
score_softmax = F.softmax(score_matrix, dim=1)
scores = torch.diag(score_softmax)
loss = torch.mean(torch.neg(torch.log(scores)))
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_per_fused__softmax_diagonal_copy_log_mean_neg_1(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tl_math.log(tmp8)
tmp10 = -tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 4.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__softmax_diagonal_copy_log_mean_neg_1[grid(1)](buf3,
buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf3,
class BiencoderLossNew(nn.Module):
def __init__(self):
super(BiencoderLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Eric-Yang-yz/dense_passage_retrieval
|
BiencoderLoss
| false
| 394
|
[
"MIT"
] | 0
|
5d6e210d5ff3304b4891c94edd2bd2aee5b34655
|
https://github.com/Eric-Yang-yz/dense_passage_retrieval/tree/5d6e210d5ff3304b4891c94edd2bd2aee5b34655
|
Attention
|
import math
import torch
import torch.nn as nn
class Attention(nn.Module):
"""
Attention mechanism (Luong)
"""
def __init__(self, hidden_size, hidden_size1):
super(Attention, self).__init__()
self.W_h = nn.Linear(hidden_size + hidden_size1, hidden_size, bias=
False)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
self.epsilon = 1e-10
nn.init.xavier_normal_(self.W_h.weight)
def forward(self, encoder_outputs, decoder_hidden, inp_mask):
seq_len = encoder_outputs.size(1)
H = decoder_hidden.repeat(seq_len, 1, 1).transpose(0, 1)
energy = torch.tanh(self.W_h(torch.cat([H, encoder_outputs], 2)))
energy = energy.transpose(2, 1)
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1)
energy = torch.bmm(v, energy).view(-1, seq_len)
a = torch.softmax(energy, dim=-1) * inp_mask
normalization_factor = a.sum(1, keepdim=True)
a = a / (normalization_factor + self.epsilon)
a = a.unsqueeze(1)
context = a.bmm(encoder_outputs)
return a, context
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'hidden_size1': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = xindex // 32
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_sum_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 1e-10
tmp9 = tmp7 + tmp8
tmp10 = tmp0 / tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_mul_4[grid(16)](buf5, primals_5, buf6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_add_div_sum_5[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(buf7, (4, 1, 4), (4, 0, 1), 0
), primals_1, out=buf8)
return reinterpret_tensor(buf7, (4, 1, 4), (4, 4, 1), 0
), buf8, primals_5, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf2, buf4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0)
class AttentionNew(nn.Module):
"""
Attention mechanism (Luong)
"""
def __init__(self, hidden_size, hidden_size1):
super(AttentionNew, self).__init__()
self.W_h = nn.Linear(hidden_size + hidden_size1, hidden_size, bias=
False)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
self.epsilon = 1e-10
nn.init.xavier_normal_(self.W_h.weight)
def forward(self, input_0, input_1, input_2):
primals_4 = self.v
primals_3 = self.W_h.weight
primals_1 = input_0
primals_2 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
DeepInEvil/kgirnet
|
Attention
| false
| 395
|
[
"MIT"
] | 0
|
81210907daba7671d3f3fd5814656d1557b7a2b1
|
https://github.com/DeepInEvil/kgirnet/tree/81210907daba7671d3f3fd5814656d1557b7a2b1
|
FocalLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2, logits=False, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.bce = (logits and F.binary_cross_entropy_with_logits or F.
binary_cross_entropy)
self.reduce = reduce
def forward(self, inputs, targets):
bce = self.bce(inputs, targets, reduction='none')
pt = torch.exp(-bce)
focal_weight = self.alpha * torch.pow(1 - pt, self.gamma)
focal_loss = focal_weight * bce
if self.reduce:
focal_loss = torch.mean(focal_loss)
return focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_exp_mean_mul_neg_pow_rsub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp1 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tmp19 = tmp18 * tmp12
tmp20 = tl.broadcast_to(tmp19, [RBLOCK])
tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0))
tmp23 = 256.0
tmp24 = tmp22 / tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_exp_mean_mul_neg_pow_rsub_0[grid
(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLossNew(nn.Module):
def __init__(self, alpha=0.25, gamma=2, logits=False, reduce=True):
super(FocalLossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.bce = (logits and F.binary_cross_entropy_with_logits or F.
binary_cross_entropy)
self.reduce = reduce
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EdwardTyantov/siim-isic-melanoma-2020
|
FocalLoss
| false
| 396
|
[
"MIT"
] | 0
|
ce0ba286244dcf7b5ccb8250505c80350efb0301
|
https://github.com/EdwardTyantov/siim-isic-melanoma-2020/tree/ce0ba286244dcf7b5ccb8250505c80350efb0301
|
AuxMLP
|
import torch
from torch import nn
from torch.nn import functional as F
class AuxMLP(nn.Module):
"""
basic structure similar to the MLP
input is splited into two 1*14*14 images for separating training, use different parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxMLP, self).__init__()
self.fc11 = nn.Linear(196, 160)
self.fc12 = nn.Linear(196, 160)
self.fc21 = nn.Linear(160, 10)
self.fc22 = nn.Linear(160, 10)
self.fc3 = nn.Linear(20, 2)
def forward(self, x1, x2):
x1 = x1.view(-1, 196)
x1 = F.relu(self.fc11(x1))
x1 = self.fc21(x1)
x2 = x2.view(-1, 196)
x2 = F.relu(self.fc12(x2))
x2 = self.fc22(x2)
x = torch.cat([x1, x2], dim=1)
x = F.relu(x)
x = torch.sigmoid(self.fc3(x))
return x, x1, x2
def get_inputs():
return [torch.rand([4, 196]), torch.rand([4, 196])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 160
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (10 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 20, tl.int64)
tmp9 = tl.load(in_ptr1 + (10 * x1 + (-10 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 196), (196, 1))
assert_size_stride(primals_2, (160, 196), (196, 1))
assert_size_stride(primals_3, (160,), (1,))
assert_size_stride(primals_4, (10, 160), (160, 1))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (4, 196), (196, 1))
assert_size_stride(primals_7, (160, 196), (196, 1))
assert_size_stride(primals_8, (160,), (1,))
assert_size_stride(primals_9, (10, 160), (160, 1))
assert_size_stride(primals_10, (10,), (1,))
assert_size_stride(primals_11, (2, 20), (20, 1))
assert_size_stride(primals_12, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 160), (160, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (196,
160), (1, 196), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(640)](buf1, primals_3, 640, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(160, 10), (1, 160), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 160), (160, 1), torch.float32)
extern_kernels.mm(primals_6, reinterpret_tensor(primals_7, (196,
160), (1, 196), 0), out=buf3)
del primals_7
buf4 = buf3
del buf3
triton_poi_fused_relu_0[grid(640)](buf4, primals_8, 640, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9,
(160, 10), (1, 160), 0), alpha=1, beta=1, out=buf5)
del primals_10
buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
triton_poi_fused_cat_relu_1[grid(80)](buf2, buf5, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_11, (20, 2), (1,
20), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_sigmoid_2[grid(8)](buf8, primals_12, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_12
return (buf8, buf2, buf5, primals_1, buf1, primals_6, buf4, buf6, buf8,
primals_11, primals_9, primals_4)
class AuxMLPNew(nn.Module):
"""
basic structure similar to the MLP
input is splited into two 1*14*14 images for separating training, use different parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxMLPNew, self).__init__()
self.fc11 = nn.Linear(196, 160)
self.fc12 = nn.Linear(196, 160)
self.fc21 = nn.Linear(160, 10)
self.fc22 = nn.Linear(160, 10)
self.fc3 = nn.Linear(20, 2)
def forward(self, input_0, input_1):
primals_2 = self.fc11.weight
primals_3 = self.fc11.bias
primals_7 = self.fc12.weight
primals_8 = self.fc12.bias
primals_4 = self.fc21.weight
primals_5 = self.fc21.bias
primals_9 = self.fc22.weight
primals_10 = self.fc22.bias
primals_11 = self.fc3.weight
primals_12 = self.fc3.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0], output[1], output[2]
|
EE559DeepLearningEPFL/Project1
|
AuxMLP
| false
| 397
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
Homography
|
import torch
import torch.nn as nn
class Homography(nn.Module):
"""Homography geometric model to be used together with ImageRegistrator
module for the optimization-based image
registration."""
def __init__(self) ->None:
super().__init__()
self.model = nn.Parameter(torch.eye(3))
self.reset_model()
def __repr__(self) ->str:
return f'{self.__class__.__name__}({self.model})'
def reset_model(self):
"""Initializes the model with identity transform."""
torch.nn.init.eye_(self.model)
def forward(self) ->torch.Tensor:
"""Single-batch homography".
Returns:
Homography matrix with shape :math:`(1, 3, 3)`.
"""
return torch.unsqueeze(self.model / self.model[2, 2], dim=0)
def forward_inverse(self) ->torch.Tensor:
"""Interted Single-batch homography".
Returns:
Homography martix with shape :math:`(1, 3, 3)`.
"""
return torch.unsqueeze(torch.inverse(self.model), dim=0)
def get_inputs():
return []
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 9
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 8)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (3, 3), (3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((3, 3), (3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(9)](primals_1, buf0, 9, XBLOCK=16,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf0, (1, 3, 3), (9, 3, 1), 0), primals_1
class HomographyNew(nn.Module):
"""Homography geometric model to be used together with ImageRegistrator
module for the optimization-based image
registration."""
def __init__(self) ->None:
super().__init__()
self.model = nn.Parameter(torch.eye(3))
self.reset_model()
def __repr__(self) ->str:
return f'{self.__class__.__name__}({self.model})'
def reset_model(self):
"""Initializes the model with identity transform."""
torch.nn.init.eye_(self.model)
def forward_inverse(self) ->torch.Tensor:
"""Interted Single-batch homography".
Returns:
Homography martix with shape :math:`(1, 3, 3)`.
"""
return torch.unsqueeze(torch.inverse(self.model), dim=0)
def forward(self):
primals_1 = self.model
output = call([primals_1])
return output[0]
|
EStorm21/kornia
|
Homography
| false
| 398
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
b2bba7950d748ba0b8ce0cc68035a248799a1044
|
https://github.com/EStorm21/kornia/tree/b2bba7950d748ba0b8ce0cc68035a248799a1044
|
AuxCNN
|
import torch
from torch import nn
from torch.nn import functional as F
class AuxCNN(nn.Module):
"""
basic structure similar to the CNN
input is splited into two 1*14*14 images for separating training, use different parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxCNN, self).__init__()
self.conv11 = nn.Conv2d(1, 32, kernel_size=3)
self.conv21 = nn.Conv2d(32, 64, kernel_size=3)
self.fc11 = nn.Linear(256, 200)
self.fc21 = nn.Linear(200, 10)
self.conv12 = nn.Conv2d(1, 32, kernel_size=3)
self.conv22 = nn.Conv2d(32, 64, kernel_size=3)
self.fc12 = nn.Linear(256, 200)
self.fc22 = nn.Linear(200, 10)
self.fc3 = nn.Linear(20, 2)
def convs(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2))
return x
def forward(self, x1, x2):
x1 = F.relu(F.max_pool2d(self.conv11(x1), kernel_size=2))
x1 = F.relu(F.max_pool2d(self.conv21(x1), kernel_size=2))
x1 = x1.view(-1, 256)
x1 = F.relu(self.fc11(x1))
x1 = self.fc21(x1)
x2 = F.relu(F.max_pool2d(self.conv12(x2), kernel_size=2))
x2 = F.relu(F.max_pool2d(self.conv22(x2), kernel_size=2))
x2 = x2.view(-1, 256)
x2 = F.relu(self.fc12(x2))
x2 = self.fc22(x2)
x = torch.cat([x1, x2], dim=1)
x = F.relu(x)
x = torch.sigmoid(self.fc3(x))
return x, x1, x2
def get_inputs():
return [torch.rand([4, 1, 64, 64]), torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x3 = xindex // 31
x2 = xindex // 30752
x4 = xindex % 30752
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 30848 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp18, xmask)
tl.store(out_ptr2 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_relu_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 3920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (10 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 20, tl.int64)
tmp9 = tl.load(in_ptr1 + (10 * x1 + (-10 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_sigmoid_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (200, 256), (256, 1))
assert_size_stride(primals_7, (200,), (1,))
assert_size_stride(primals_8, (10, 200), (200, 1))
assert_size_stride(primals_9, (10,), (1,))
assert_size_stride(primals_10, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_13, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_14, (64,), (1,))
assert_size_stride(primals_15, (200, 256), (256, 1))
assert_size_stride(primals_16, (200,), (1,))
assert_size_stride(primals_17, (10, 200), (200, 1))
assert_size_stride(primals_18, (10,), (1,))
assert_size_stride(primals_19, (2, 20), (20, 1))
assert_size_stride(primals_20, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(492032)](buf1, primals_2,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(123008)](buf1,
buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 29, 29), (53824, 841, 29, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(215296)](buf5, primals_5,
215296, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
buf26 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(50176)](buf5, buf6, buf7, buf26, 50176, XBLOCK=512, num_warps=
4, num_stages=1)
buf8 = empty_strided_cuda((196, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (196, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 200), (1, 256), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(39200)](buf9, primals_7, 39200, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((196, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(200, 10), (1, 200), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf11 = extern_kernels.convolution(primals_12, primals_10, stride=(
1, 1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_0[grid(492032)](buf12, primals_11,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf13 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1),
torch.int8)
buf14 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(123008)](buf12,
buf13, buf14, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_13, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 29, 29), (53824, 841, 29, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_2[grid(215296)](buf16, primals_14,
215296, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_14
buf17 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf18 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
buf25 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(50176)](buf16, buf17, buf18, buf25, 50176, XBLOCK=512,
num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((196, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (196, 256), (256, 1), 0
), reinterpret_tensor(primals_15, (256, 200), (1, 256), 0), out
=buf19)
buf20 = buf19
del buf19
triton_poi_fused_relu_4[grid(39200)](buf20, primals_16, 39200,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_16
buf21 = empty_strided_cuda((196, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_18, buf20, reinterpret_tensor(
primals_17, (200, 10), (1, 200), 0), alpha=1, beta=1, out=buf21)
del primals_18
buf22 = empty_strided_cuda((196, 20), (20, 1), torch.float32)
triton_poi_fused_cat_relu_5[grid(3920)](buf10, buf21, buf22, 3920,
XBLOCK=256, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((196, 2), (2, 1), torch.float32)
extern_kernels.mm(buf22, reinterpret_tensor(primals_19, (20, 2), (1,
20), 0), out=buf23)
buf24 = buf23
del buf23
triton_poi_fused_sigmoid_6[grid(392)](buf24, primals_20, 392,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_20
return (buf24, buf10, buf21, primals_1, primals_3, primals_4,
primals_10, primals_12, primals_13, buf1, buf2, buf3, buf5, buf6,
reinterpret_tensor(buf7, (196, 256), (256, 1), 0), buf9, buf12,
buf13, buf14, buf16, buf17, reinterpret_tensor(buf18, (196, 256), (
256, 1), 0), buf20, buf22, buf24, primals_19, primals_17,
primals_15, buf25, primals_8, primals_6, buf26)
class AuxCNNNew(nn.Module):
"""
basic structure similar to the CNN
input is splited into two 1*14*14 images for separating training, use different parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxCNNNew, self).__init__()
self.conv11 = nn.Conv2d(1, 32, kernel_size=3)
self.conv21 = nn.Conv2d(32, 64, kernel_size=3)
self.fc11 = nn.Linear(256, 200)
self.fc21 = nn.Linear(200, 10)
self.conv12 = nn.Conv2d(1, 32, kernel_size=3)
self.conv22 = nn.Conv2d(32, 64, kernel_size=3)
self.fc12 = nn.Linear(256, 200)
self.fc22 = nn.Linear(200, 10)
self.fc3 = nn.Linear(20, 2)
def convs(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2))
return x
def forward(self, input_0, input_1):
primals_1 = self.conv11.weight
primals_2 = self.conv11.bias
primals_4 = self.conv21.weight
primals_5 = self.conv21.bias
primals_6 = self.fc11.weight
primals_7 = self.fc11.bias
primals_8 = self.fc21.weight
primals_9 = self.fc21.bias
primals_10 = self.conv12.weight
primals_11 = self.conv12.bias
primals_13 = self.conv22.weight
primals_14 = self.conv22.bias
primals_15 = self.fc12.weight
primals_16 = self.fc12.bias
primals_17 = self.fc22.weight
primals_18 = self.fc22.bias
primals_19 = self.fc3.weight
primals_20 = self.fc3.bias
primals_3 = input_0
primals_12 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0], output[1], output[2]
|
EE559DeepLearningEPFL/Project1
|
AuxCNN
| false
| 399
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
AuxsiameseCNN
|
import torch
from torch import nn
from torch.nn import functional as F
class AuxsiameseCNN(nn.Module):
"""
basic structure similar to the CNN
input is splited into two 1*14*14 images for separating training, share the same parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxsiameseCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.fc1 = nn.Linear(256, 200)
self.fc2 = nn.Linear(200, 10)
self.fc3 = nn.Linear(20, 2)
def convs(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2))
return x
def forward(self, x1, x2):
x1 = self.convs(x1)
x1 = x1.view(-1, 256)
x1 = F.relu(self.fc1(x1))
x1 = self.fc2(x1)
x2 = self.convs(x2)
x2 = x2.view(-1, 256)
x2 = F.relu(self.fc1(x2))
x2 = self.fc2(x2)
x = torch.cat([x1, x2], dim=1)
x = F.relu(x)
x = torch.sigmoid(self.fc3(x))
return x, x1, x2
def get_inputs():
return [torch.rand([4, 1, 64, 64]), torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x3 = xindex // 31
x2 = xindex // 30752
x4 = xindex % 30752
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 30848 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp18, xmask)
tl.store(out_ptr2 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_relu_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 3920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (10 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 20, tl.int64)
tmp9 = tl.load(in_ptr1 + (10 * x1 + (-10 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_sigmoid_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (200, 256), (256, 1))
assert_size_stride(primals_7, (200,), (1,))
assert_size_stride(primals_8, (10, 200), (200, 1))
assert_size_stride(primals_9, (10,), (1,))
assert_size_stride(primals_10, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_11, (2, 20), (20, 1))
assert_size_stride(primals_12, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf11 = extern_kernels.convolution(primals_10, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0
del buf0
buf12 = buf11
del buf11
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(492032)](buf1, buf12, primals_2,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(123008)](buf1,
buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 29, 29), (53824, 841, 29, 1))
buf13 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1),
torch.int8)
buf14 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(123008)](buf12,
buf13, buf14, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 29, 29), (53824, 841, 29, 1))
buf5 = buf4
del buf4
buf16 = buf15
del buf15
triton_poi_fused_convolution_2[grid(215296)](buf5, buf16, primals_5,
215296, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
buf26 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(50176)](buf5, buf6, buf7, buf26, 50176, XBLOCK=512, num_warps=
4, num_stages=1)
buf8 = empty_strided_cuda((196, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (196, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 200), (1, 256), 0), out=buf8)
buf17 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf18 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
buf25 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(50176)](buf16, buf17, buf18, buf25, 50176, XBLOCK=512,
num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((196, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (196, 256), (256, 1), 0
), reinterpret_tensor(primals_6, (256, 200), (1, 256), 0), out=
buf19)
buf9 = buf8
del buf8
buf20 = buf19
del buf19
triton_poi_fused_relu_4[grid(39200)](buf9, buf20, primals_7, 39200,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((196, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(200, 10), (1, 200), 0), alpha=1, beta=1, out=buf10)
buf21 = empty_strided_cuda((196, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf20, reinterpret_tensor(primals_8,
(200, 10), (1, 200), 0), alpha=1, beta=1, out=buf21)
del primals_9
buf22 = empty_strided_cuda((196, 20), (20, 1), torch.float32)
triton_poi_fused_cat_relu_5[grid(3920)](buf10, buf21, buf22, 3920,
XBLOCK=256, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((196, 2), (2, 1), torch.float32)
extern_kernels.mm(buf22, reinterpret_tensor(primals_11, (20, 2), (1,
20), 0), out=buf23)
buf24 = buf23
del buf23
triton_poi_fused_sigmoid_6[grid(392)](buf24, primals_12, 392,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_12
return (buf24, buf10, buf21, primals_1, primals_3, primals_4,
primals_10, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7,
(196, 256), (256, 1), 0), buf9, buf12, buf13, buf14, buf16, buf17,
reinterpret_tensor(buf18, (196, 256), (256, 1), 0), buf20, buf22,
buf24, primals_11, primals_8, primals_6, buf25, buf26)
class AuxsiameseCNNNew(nn.Module):
"""
basic structure similar to the CNN
input is splited into two 1*14*14 images for separating training, share the same parameters
softmax for the auxiliary output layers
"""
def __init__(self):
super(AuxsiameseCNNNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.fc1 = nn.Linear(256, 200)
self.fc2 = nn.Linear(200, 10)
self.fc3 = nn.Linear(20, 2)
def convs(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2))
return x
def forward(self, input_0, input_1):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_11 = self.fc3.weight
primals_12 = self.fc3.bias
primals_3 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0], output[1], output[2]
|
EE559DeepLearningEPFL/Project1
|
AuxsiameseCNN
| false
| 400
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
AddBias
|
import torch
import torch.nn as nn
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self.bias = bias
def forward(self, x):
return x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'bias': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AddBiasNew(nn.Module):
def __init__(self, bias):
super(AddBiasNew, self).__init__()
self.bias = bias
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ErikML/convex_adversarial
|
AddBias
| false
| 401
|
[
"MIT"
] | 0
|
52652943cfdb54199b579dbe70d3be20d2a13f23
|
https://github.com/ErikML/convex_adversarial/tree/52652943cfdb54199b579dbe70d3be20d2a13f23
|
Discriminator
|
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, n_h):
super(Discriminator, self).__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = torch.unsqueeze(c, 1)
c_x = c_x.expand_as(h_pl)
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits
def get_inputs():
return [torch.rand([4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_h': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tmp14 + tmp7
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp10, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_2, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(
buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
buf2 = buf1
del buf1
buf3 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_5, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(
buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](buf2, primals_4, buf4, buf5, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del buf2
del buf4
del primals_4
return buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_5, (16, 4), (4, 1), 0)
class DiscriminatorNew(nn.Module):
def __init__(self, n_h):
super(DiscriminatorNew, self).__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1, input_2):
primals_3 = self.f_k.weight
primals_4 = self.f_k.bias
primals_1 = input_0
primals_2 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
EchooooAi/gsrl
|
Discriminator
| false
| 402
|
[
"MIT"
] | 0
|
1cfc73fe052c09fe05df09c92c5232e7341e9ad0
|
https://github.com/EchooooAi/gsrl/tree/1cfc73fe052c09fe05df09c92c5232e7341e9ad0
|
Hsigmoid
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HsigmoidNew(nn.Module):
def __init__(self, inplace=True):
super(HsigmoidNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
EllisHui/outOfRailWay
|
Hsigmoid
| false
| 403
|
[
"BSD-2-Clause"
] | 0
|
e3bf9aaa18879bee5536740d55006c872f06278f
|
https://github.com/EllisHui/outOfRailWay/tree/e3bf9aaa18879bee5536740d55006c872f06278f
|
DistillKL
|
import torch
from torch import nn
import torch.nn.functional as F
class DistillKL(nn.Module):
"""KL divergence for distillation"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s / self.T, dim=1)
p_t = F.softmax(y_t / self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False
) * self.T ** 2 / y_s.shape[0]
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 16.0
tmp37 = tmp35 * tmp36
tmp38 = 0.25
tmp39 = tmp37 * tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1)
](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class DistillKLNew(nn.Module):
"""KL divergence for distillation"""
def __init__(self, T):
super(DistillKLNew, self).__init__()
self.T = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Electronicshelf/Few-shot-regularization
|
DistillKL
| false
| 404
|
[
"MIT"
] | 0
|
3fd0fef52684af77a5e574b5d61cfd8dd557b14b
|
https://github.com/Electronicshelf/Few-shot-regularization/tree/3fd0fef52684af77a5e574b5d61cfd8dd557b14b
|
Normalize
|
import torch
from torch import nn
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power)
out = x.div(norm)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_pow_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
def __init__(self, power=2):
super(NormalizeNew, self).__init__()
self.power = power
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Electronicshelf/Few-shot-regularization
|
Normalize
| false
| 405
|
[
"MIT"
] | 0
|
3fd0fef52684af77a5e574b5d61cfd8dd557b14b
|
https://github.com/Electronicshelf/Few-shot-regularization/tree/3fd0fef52684af77a5e574b5d61cfd8dd557b14b
|
CNN
|
import torch
from torch import nn
from torch.nn import functional as F
class CNN(nn.Module):
"""
conv1, conv2
two convolution layers.
fc1, fc2
two fully connected layers.
fc3
output layer
relu
activation function for hidden layers
sigmoid
activation function for output layer
"""
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(2, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.fc1 = nn.Linear(256, 200)
self.fc2 = nn.Linear(200, 10)
self.fc3 = nn.Linear(10, 2)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2))
x = x.view(-1, 256)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x3 = xindex // 31
x2 = xindex // 30752
x4 = xindex % 30752
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 30848 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp18, xmask)
tl.store(out_ptr2 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1960
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (200, 256), (256, 1))
assert_size_stride(primals_7, (200,), (1,))
assert_size_stride(primals_8, (10, 200), (200, 1))
assert_size_stride(primals_9, (10,), (1,))
assert_size_stride(primals_10, (2, 10), (10, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(492032)](buf1, primals_2,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(123008)](buf1,
buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 29, 29), (53824, 841, 29, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(215296)](buf5, primals_5,
215296, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(50176)](buf5, buf6, buf7, buf14, 50176, XBLOCK=512, num_warps=
4, num_stages=1)
buf8 = empty_strided_cuda((196, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (196, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 200), (1, 256), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(39200)](buf9, primals_7, 39200, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((196, 10), (10, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (200, 10), (1,
200), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(1960)](buf11, primals_9, 1960, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((196, 2), (2, 1), torch.float32)
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (10, 2), (1,
10), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_sigmoid_6[grid(392)](buf13, primals_11, 392,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (196, 256), (256, 1), 0), buf9,
buf11, buf13, primals_10, primals_8, primals_6, buf14)
class CNNNew(nn.Module):
"""
conv1, conv2
two convolution layers.
fc1, fc2
two fully connected layers.
fc3
output layer
relu
activation function for hidden layers
sigmoid
activation function for output layer
"""
def __init__(self):
super(CNNNew, self).__init__()
self.conv1 = nn.Conv2d(2, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.fc1 = nn.Linear(256, 200)
self.fc2 = nn.Linear(200, 10)
self.fc3 = nn.Linear(10, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
EE559DeepLearningEPFL/Project1
|
CNN
| false
| 406
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
RegressionModel
|
import torch
import torch.nn as nn
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=
3, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 4)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, in_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 576 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 36 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (36, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (36,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 36, 4, 4), (576, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.
float32)
buf10 = reinterpret_tensor(buf9, (4, 144, 4), (576, 4, 1), 0)
del buf9
triton_poi_fused_clone_view_1[grid(64, 36)](buf10, buf8, primals_11,
64, 36, XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1)
del buf8
del primals_11
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7)
class RegressionModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModelNew, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=
3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
DvdNss/FaceMaskDetection
|
RegressionModel
| false
| 407
|
[
"Apache-2.0"
] | 0
|
3a4efe2bf8fb224d72636c853684a563b4e7a2e4
|
https://github.com/DvdNss/FaceMaskDetection/tree/3a4efe2bf8fb224d72636c853684a563b4e7a2e4
|
SiameseCNN
|
import torch
from torch import nn
from torch.nn import functional as F
class SiameseCNN(nn.Module):
"""
basic structure similar to the CNN
input is splited into two 1*14*14 images for separating training, share the same parameters
"""
def __init__(self):
super(SiameseCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.fc1 = nn.Linear(256, 200)
self.fc2 = nn.Linear(200, 10)
self.fc3 = nn.Linear(20, 2)
def convs(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2))
return x
def forward(self, x1, x2):
x1 = self.convs(x1)
x1 = x1.view(-1, 256)
x1 = F.relu(self.fc1(x1))
x1 = F.relu(self.fc2(x1))
x2 = self.convs(x2)
x2 = x2.view(-1, 256)
x2 = F.relu(self.fc1(x2))
x2 = F.relu(self.fc2(x2))
x = torch.cat([x1, x2], dim=1)
x = torch.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64]), torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x3 = xindex // 31
x2 = xindex // 30752
x4 = xindex % 30752
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 30848 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp18, xmask)
tl.store(out_ptr2 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 39200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (10 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 20, tl.int64)
tmp15 = tl.load(in_ptr2 + (10 * x1 + (-10 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (-10 + x0), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_sigmoid_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1960
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp10 = tmp9 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (200, 256), (256, 1))
assert_size_stride(primals_7, (200,), (1,))
assert_size_stride(primals_8, (10, 200), (200, 1))
assert_size_stride(primals_9, (10,), (1,))
assert_size_stride(primals_10, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_11, (2, 20), (20, 1))
assert_size_stride(primals_12, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf11 = extern_kernels.convolution(primals_10, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0
del buf0
buf12 = buf11
del buf11
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(492032)](buf1, buf12, primals_2,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(123008)](buf1,
buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 29, 29), (53824, 841, 29, 1))
buf13 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1),
torch.int8)
buf14 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(123008)](buf12,
buf13, buf14, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 29, 29), (53824, 841, 29, 1))
buf5 = buf4
del buf4
buf16 = buf15
del buf15
triton_poi_fused_convolution_2[grid(215296)](buf5, buf16, primals_5,
215296, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
buf28 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(50176)](buf5, buf6, buf7, buf28, 50176, XBLOCK=512, num_warps=
4, num_stages=1)
buf8 = empty_strided_cuda((196, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (196, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 200), (1, 256), 0), out=buf8)
buf17 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf18 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
buf26 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(50176)](buf16, buf17, buf18, buf26, 50176, XBLOCK=512,
num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((196, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (196, 256), (256, 1), 0
), reinterpret_tensor(primals_6, (256, 200), (1, 256), 0), out=
buf19)
buf9 = buf8
del buf8
buf20 = buf19
del buf19
triton_poi_fused_relu_4[grid(39200)](buf9, buf20, primals_7, 39200,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((196, 10), (10, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (200, 10), (1,
200), 0), out=buf10)
buf21 = empty_strided_cuda((196, 10), (10, 1), torch.float32)
extern_kernels.mm(buf20, reinterpret_tensor(primals_8, (200, 10), (
1, 200), 0), out=buf21)
buf22 = empty_strided_cuda((196, 20), (20, 1), torch.float32)
triton_poi_fused_cat_5[grid(3920)](buf10, primals_9, buf21, buf22,
3920, XBLOCK=128, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((196, 2), (2, 1), torch.float32)
extern_kernels.mm(buf22, reinterpret_tensor(primals_11, (20, 2), (1,
20), 0), out=buf23)
buf24 = buf23
del buf23
triton_poi_fused_sigmoid_6[grid(392)](buf24, primals_12, 392,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_12
buf25 = empty_strided_cuda((196, 10), (10, 1), torch.bool)
buf27 = empty_strided_cuda((196, 10), (10, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(1960)](buf21,
primals_9, buf10, buf25, buf27, 1960, XBLOCK=128, num_warps=4,
num_stages=1)
del buf10
del buf21
del primals_9
return (buf24, primals_1, primals_3, primals_4, primals_10, buf1, buf2,
buf3, buf5, buf6, reinterpret_tensor(buf7, (196, 256), (256, 1), 0),
buf9, buf12, buf13, buf14, buf16, buf17, reinterpret_tensor(buf18,
(196, 256), (256, 1), 0), buf20, buf22, buf24, primals_11, buf25,
primals_8, primals_6, buf26, buf27, buf28)
class SiameseCNNNew(nn.Module):
"""
basic structure similar to the CNN
input is splited into two 1*14*14 images for separating training, share the same parameters
"""
def __init__(self):
super(SiameseCNNNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.fc1 = nn.Linear(256, 200)
self.fc2 = nn.Linear(200, 10)
self.fc3 = nn.Linear(20, 2)
def convs(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2))
return x
def forward(self, input_0, input_1):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_11 = self.fc3.weight
primals_12 = self.fc3.bias
primals_3 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
EE559DeepLearningEPFL/Project1
|
SiameseCNN
| false
| 408
|
[
"MIT"
] | 0
|
cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
https://github.com/EE559DeepLearningEPFL/Project1/tree/cbafdfee26771ae0ba3cd36375e68d92e9f108b2
|
L2Norm
|
import torch
import torch.nn as nn
class L2Norm(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2Norm, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, data):
return self.scale * data * data.pow(2).sum(dim=1, keepdim=True).clamp(
min=1e-12).rsqrt()
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = xindex // 16
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 2048 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_per_fused_clamp_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-12
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = libdevice.rsqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 512
x3 = xindex
x0 = xindex % 16
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, None)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (4, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 4, 1, 16),
torch.float32)
get_raw_stream(0)
triton_red_fused_pow_sum_0[grid(256)](primals_2, buf0, 256, 128,
XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
triton_per_fused_clamp_pow_rsqrt_sum_1[grid(64)](buf2, buf0, 64, 4,
XBLOCK=64, num_warps=2, num_stages=1)
del buf0
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_mul_2[grid(32768)](primals_1, primals_2, buf2,
buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf3, primals_2, buf2
class L2NormNew(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2NormNew, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
EldritchJS/inference_results_v0.5
|
L2Norm
| false
| 409
|
[
"Apache-2.0"
] | 0
|
5552490e184d9fc342d871fcc410ac423ea49053
|
https://github.com/EldritchJS/inference_results_v0.5/tree/5552490e184d9fc342d871fcc410ac423ea49053
|
BasicModel3
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel3(nn.Module):
"""
Example model two from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - relu_out2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp7 = tmp4 - tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BasicModel3New(nn.Module):
"""
Example model two from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Europium248/captum
|
BasicModel3
| false
| 410
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
Conv2dWithConstraint
|
import torch
from torch import nn
class Conv2dWithConstraint(nn.Conv2d):
def __init__(self, *args, max_norm=1, **kwargs):
self.max_norm = max_norm
super(Conv2dWithConstraint, self).__init__(*args, **kwargs)
def forward(self, x):
self.weight.data = torch.renorm(self.weight.data, p=2, dim=0,
maxnorm=self.max_norm)
return super(Conv2dWithConstraint, self).forward(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_renorm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl
.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1.0
tmp8 = tmp6 > tmp7
tmp9 = 1e-07
tmp10 = tmp6 + tmp9
tmp11 = tl.full([1, 1], 1, tl.int32)
tmp12 = tmp11 / tmp10
tmp13 = tmp12 * tmp7
tmp14 = tl.where(tmp8, tmp13, tmp7)
tmp15 = tmp0 * tmp14
tl.store(out_ptr1 + (r1 + 64 * x0), tmp15, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_renorm_0[grid(4)](primals_1, buf1, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = torch.ops.aten.set_.source_Tensor(primals_1, buf1)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
del buf2
del primals_1
return buf3, primals_3, buf1
class Conv2dWithConstraintNew(nn.Conv2d):
def __init__(self, *args, max_norm=1, **kwargs):
self.max_norm = max_norm
super(Conv2dWithConstraintNew, self).__init__(*args, **kwargs)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ErikBjare/braindecode
|
Conv2dWithConstraint
| false
| 411
|
[
"BSD-3-Clause"
] | 0
|
d77788d4813d353b65f0bfc61c6129015b0390b4
|
https://github.com/ErikBjare/braindecode/tree/d77788d4813d353b65f0bfc61c6129015b0390b4
|
EdgeCaseModel
|
import torch
import torch.nn as nn
class LayerWithRidiculouslyLongNameAndDoesntDoAnything(nn.Module):
def forward(self, x):
return x
class EdgeCaseModel(nn.Module):
def __init__(self, throw_error=False, return_str=False):
super().__init__()
self.throw_error = throw_error
self.return_str = return_str
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.model = LayerWithRidiculouslyLongNameAndDoesntDoAnything()
def forward(self, x):
x = self.conv1(x)
x = self.model('string output' if self.return_str else x)
if self.throw_error:
x = self.conv1(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class LayerWithRidiculouslyLongNameAndDoesntDoAnything(nn.Module):
def forward(self, x):
return x
class EdgeCaseModelNew(nn.Module):
def __init__(self, throw_error=False, return_str=False):
super().__init__()
self.throw_error = throw_error
self.return_str = return_str
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.model = LayerWithRidiculouslyLongNameAndDoesntDoAnything()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EarthLab-Luxembourg/torch-summary
|
EdgeCaseModel
| false
| 412
|
[
"MIT"
] | 0
|
8ef25aea5e9fb075df27e1e0c77bad56a7254397
|
https://github.com/EarthLab-Luxembourg/torch-summary/tree/8ef25aea5e9fb075df27e1e0c77bad56a7254397
|
BasicModel6_MultiTensor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel6_MultiTensor(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input1, input2):
input = input1 + input2
return 1 - F.relu(1 - input)[:, 1]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp2
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp3 - tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_rsub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BasicModel6_MultiTensorNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Europium248/captum
|
BasicModel6_MultiTensor
| false
| 413
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
Attention
|
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.nn.init import *
class Attention(nn.Module):
"""
Attention Network.
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(Attention, self).__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
att1 = self.encoder_att(encoder_out)
att2 = self.decoder_att(decoder_hidden)
att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2)
alpha = self.softmax(att)
attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(
dim=1)
return attention_weighted_encoding, alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_dim': 4, 'decoder_dim': 4, 'attention_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.nn.init import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex % 256
x0 = xindex % 4
x3 = xindex // 256
x6 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(out_ptr0 + x4, tmp8, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x1 = xindex // 4 % 16
x3 = xindex // 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (48 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp0 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp0 * tmp9
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + x5, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(1024)](buf0,
primals_2, buf1, primals_5, buf2, buf8, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_sum_3[grid(1024)](primals_3, buf6, buf7, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
return buf7, buf6, primals_3, reinterpret_tensor(primals_6, (64, 4), (4,
1), 0), reinterpret_tensor(buf2, (256, 4), (4, 1), 0
), buf6, primals_7, buf8
class AttentionNew(nn.Module):
"""
Attention Network.
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(AttentionNew, self).__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0, input_1):
primals_1 = self.encoder_att.weight
primals_2 = self.encoder_att.bias
primals_4 = self.decoder_att.weight
primals_5 = self.decoder_att.bias
primals_7 = self.full_att.weight
primals_8 = self.full_att.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
|
EVA4-RS-Group/Phase2
|
Attention
| false
| 414
|
[
"Apache-2.0"
] | 0
|
7c551e3894979cc425dd51baeddbfa5a51b7878d
|
https://github.com/EVA4-RS-Group/Phase2/tree/7c551e3894979cc425dd51baeddbfa5a51b7878d
|
PredictionHead
|
import torch
import torch.nn as nn
class PredictionHead(nn.Module):
def __init__(self, in_channels, num_classes, num_anchors):
super(PredictionHead, self).__init__()
self.classification = nn.Conv2d(in_channels, num_classes *
num_anchors, kernel_size=1)
self.regression = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1
)
self.num_classes = num_classes
self.num_anchors = num_anchors
def forward(self, x):
bs = x.shape[0]
class_logits = self.classification(x)
box_regression = self.regression(x)
class_logits = class_logits.permute(0, 2, 3, 1).reshape(bs, -1,
self.num_classes)
box_regression = box_regression.permute(0, 2, 3, 1).reshape(bs, -1, 4)
return class_logits, box_regression
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'num_classes': 4, 'num_anchors': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_out_ptr0, in_ptr0, in_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 256 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4, 4), (256, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
buf3 = reinterpret_tensor(buf2, (4, 64, 4), (256, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(64, 16)](buf3, buf0,
primals_3, 64, 16, XBLOCK=16, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf4, (4, 64, 4), (256, 4, 1), 0)
del buf4
triton_poi_fused__unsafe_view_clone_0[grid(64, 16)](buf5, buf1,
primals_5, 64, 16, XBLOCK=16, YBLOCK=32, num_warps=4, num_stages=1)
del buf1
del primals_5
return buf3, buf5, primals_1, primals_2, primals_4
class PredictionHeadNew(nn.Module):
def __init__(self, in_channels, num_classes, num_anchors):
super(PredictionHeadNew, self).__init__()
self.classification = nn.Conv2d(in_channels, num_classes *
num_anchors, kernel_size=1)
self.regression = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1
)
self.num_classes = num_classes
self.num_anchors = num_anchors
def forward(self, input_0):
primals_2 = self.classification.weight
primals_3 = self.classification.bias
primals_4 = self.regression.weight
primals_5 = self.regression.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
EldritchJS/inference_results_v0.5
|
PredictionHead
| false
| 415
|
[
"Apache-2.0"
] | 0
|
5552490e184d9fc342d871fcc410ac423ea49053
|
https://github.com/EldritchJS/inference_results_v0.5/tree/5552490e184d9fc342d871fcc410ac423ea49053
|
BasicModel2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel2(nn.Module):
"""
Example model one from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - 1 - relu_out2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 1.0
tmp4 = tmp2 - tmp3
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = tmp4 - tmp6
tmp8 = triton_helpers.maximum(tmp1, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BasicModel2New(nn.Module):
"""
Example model one from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Europium248/captum
|
BasicModel2
| false
| 416
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
BasicModel5_MultiArgs
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel5_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1])
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1) * additional_input1[0]
relu_out2 = F.relu(input2)
relu_out2 = relu_out2 * additional_input1[1]
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 - tmp10
tmp12 = triton_helpers.maximum(tmp3, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_relu_sub_0[grid(256)](arg0_1, arg1_1, arg2_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0),
class BasicModel5_MultiArgsNew(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1])
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Europium248/captum
|
BasicModel5_MultiArgs
| false
| 417
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
RecursiveNet
|
import torch
import torch.nn as nn
class RecursiveNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(64, 64, 3, 1, 1)
def forward(self, x, args1=None, args2=None):
del args1, args2
for _ in range(3):
out = self.conv1(x)
out = self.conv1(out)
return out
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(1048576)](buf3, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, primals_3, buf1
class RecursiveNetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(64, 64, 3, 1, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EarthLab-Luxembourg/torch-summary
|
RecursiveNet
| false
| 418
|
[
"MIT"
] | 0
|
8ef25aea5e9fb075df27e1e0c77bad56a7254397
|
https://github.com/EarthLab-Luxembourg/torch-summary/tree/8ef25aea5e9fb075df27e1e0c77bad56a7254397
|
BasicModel4_MultiArgs
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel4_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3)
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
relu_out2 = relu_out2.div(additional_input1)
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp4 - tmp8
tmp10 = triton_helpers.maximum(tmp3, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_relu_sub_0[grid(256)](arg0_1, arg1_1, arg2_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0),
class BasicModel4_MultiArgsNew(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3)
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Europium248/captum
|
BasicModel4_MultiArgs
| false
| 419
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
LanguageModelCriterion
|
import torch
import torch.nn as nn
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
input = input.contiguous().view(-1, input.size(2))
target = target.contiguous().view(-1, 1)
mask = mask.contiguous().view(-1, 1)
output = -input.gather(1, target) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([256, 4, 4], dtype=torch.int64), torch.ones([256],
dtype=torch.int64), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = tl.broadcast_to(tmp9, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (256, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (256,), (1,))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_gather_mul_neg_sum_0[grid(1)](buf2, arg1_1,
arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class LanguageModelCriterionNew(nn.Module):
def __init__(self):
super(LanguageModelCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
FYJNEVERFOLLOWS/hw_NLP
|
LanguageModelCriterion
| false
| 420
|
[
"MIT"
] | 0
|
c9d72804128dfed3a53e9df40e94b2d53cccacae
|
https://github.com/FYJNEVERFOLLOWS/hw_NLP/tree/c9d72804128dfed3a53e9df40e94b2d53cccacae
|
BasicModel
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = 1 - F.relu(1 - input)
return input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class BasicModelNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Europium248/captum
|
BasicModel
| false
| 421
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
ClassificationModel
|
import torch
import torch.nn as nn
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80,
prior=0.01, feature_size=256):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = self.output_act(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.
num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_clone_convolution_5(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 46080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 720
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (720, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (720,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 9)](primals_1, buf0, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_4, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_6, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_8, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((720, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(184320, 9)](primals_10, buf5, 184320, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(16384)](buf7, primals_2,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf8 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_5,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(16384)](buf11, primals_7,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(16384)](buf13, primals_9,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf14 = extern_kernels.convolution(buf13, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 720, 4, 4), (11520, 1, 2880, 720))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((4, 4, 4, 9, 80), (11520, 2880, 720, 80,
1), torch.float32)
triton_poi_fused_clone_convolution_5[grid(46080)](buf15, primals_11,
buf16, 46080, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
return reinterpret_tensor(buf16, (4, 144, 80), (11520, 80, 1), 0
), buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15
class ClassificationModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80,
prior=0.01, feature_size=256):
super(ClassificationModelNew, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
DvdNss/FaceMaskDetection
|
ClassificationModel
| false
| 422
|
[
"Apache-2.0"
] | 0
|
3a4efe2bf8fb224d72636c853684a563b4e7a2e4
|
https://github.com/DvdNss/FaceMaskDetection/tree/3a4efe2bf8fb224d72636c853684a563b4e7a2e4
|
ReLUDeepLiftModel
|
import torch
import torch.nn as nn
class ReLUDeepLiftModel(nn.Module):
"""
https://www.youtube.com/watch?v=f_iAM0NPwnM
"""
def __init__(self):
super().__init__()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x1, x2, x3=2):
return 2 * self.relu1(x1) + x3 * self.relu2(x2 - 1.5)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = 1.5
tmp7 = tmp5 - tmp6
tmp8 = triton_helpers.maximum(tmp1, tmp7)
tmp9 = tmp8 * tmp3
tmp10 = tmp4 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ReLUDeepLiftModelNew(nn.Module):
"""
https://www.youtube.com/watch?v=f_iAM0NPwnM
"""
def __init__(self):
super().__init__()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Europium248/captum
|
ReLUDeepLiftModel
| false
| 423
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
eSEModule
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc(x)
x = self.hsigmoid(x)
return input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_1[grid(256)](
primals_1, buf2, primals_3, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_2[grid(16)](buf2,
primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del primals_3
return buf3, primals_1, primals_2, buf1, buf4
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModuleNew(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EllisHui/outOfRailWay
|
eSEModule
| false
| 424
|
[
"BSD-2-Clause"
] | 0
|
e3bf9aaa18879bee5536740d55006c872f06278f
|
https://github.com/EllisHui/outOfRailWay/tree/e3bf9aaa18879bee5536740d55006c872f06278f
|
TemporalEmbedding
|
import math
import torch
import torch.nn as nn
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbedding(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super(TemporalEmbedding, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
return hour_x + weekday_x + day_x + month_x + minute_x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 24, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 24) | ~xmask,
'index out of bounds: 0 <= tmp5 < 24')
tmp7 = tl.load(in_ptr1 + (x0 + 4 * tmp5), xmask)
tmp9 = tmp8.to(tl.int64)
tmp10 = tl.full([XBLOCK], 7, tl.int32)
tmp11 = tmp9 + tmp10
tmp12 = tmp9 < 0
tmp13 = tl.where(tmp12, tmp11, tmp9)
tl.device_assert((0 <= tmp13) & (tmp13 < 7) | ~xmask,
'index out of bounds: 0 <= tmp13 < 7')
tmp15 = tl.load(in_ptr2 + (x0 + 4 * tmp13), xmask)
tmp16 = tmp7 + tmp15
tmp18 = tmp17.to(tl.int64)
tmp19 = tl.full([XBLOCK], 32, tl.int32)
tmp20 = tmp18 + tmp19
tmp21 = tmp18 < 0
tmp22 = tl.where(tmp21, tmp20, tmp18)
tl.device_assert((0 <= tmp22) & (tmp22 < 32) | ~xmask,
'index out of bounds: 0 <= tmp22 < 32')
tmp24 = tl.load(in_ptr3 + (x0 + 4 * tmp22), xmask)
tmp25 = tmp16 + tmp24
tmp27 = tmp26.to(tl.int64)
tmp28 = tl.full([XBLOCK], 13, tl.int32)
tmp29 = tmp27 + tmp28
tmp30 = tmp27 < 0
tmp31 = tl.where(tmp30, tmp29, tmp27)
tl.device_assert((0 <= tmp31) & (tmp31 < 13) | ~xmask,
'index out of bounds: 0 <= tmp31 < 13')
tmp33 = tl.load(in_ptr4 + (x0 + 4 * tmp31), xmask)
tmp34 = tmp25 + tmp33
tmp35 = 0.0
tmp36 = tmp34 + tmp35
tl.store(out_ptr0 + x4, tmp36, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (24, 4), (4, 1))
assert_size_stride(arg2_1, (7, 4), (4, 1))
assert_size_stride(arg3_1, (32, 4), (4, 1))
assert_size_stride(arg4_1, (13, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_embedding_0[grid(256)](arg0_1, arg1_1, arg2_1,
arg3_1, arg4_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return buf0,
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbeddingNew(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super(TemporalEmbeddingNew, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, input_0):
arg1_1 = self.hour_embed.emb.weight
arg2_1 = self.weekday_embed.emb.weight
arg3_1 = self.day_embed.emb.weight
arg4_1 = self.month_embed.emb.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
|
Fanxingye/Informer2020
|
TemporalEmbedding
| false
| 425
|
[
"Apache-2.0"
] | 0
|
94fd05f82ff0882681a9716ae3e980a574fdcbed
|
https://github.com/Fanxingye/Informer2020/tree/94fd05f82ff0882681a9716ae3e980a574fdcbed
|
TanhDeepLiftModel
|
import torch
import torch.nn as nn
class TanhDeepLiftModel(nn.Module):
"""
Same as the ReLUDeepLiftModel, but with activations
that can have negative outputs
"""
def __init__(self):
super().__init__()
self.tanh1 = nn.Tanh()
self.tanh2 = nn.Tanh()
def forward(self, x1, x2):
return 2 * self.tanh1(x1) + 2 * self.tanh2(x2 - 1.5)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sub_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp5 = 1.5
tmp6 = tmp4 - tmp5
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp7 * tmp2
tmp9 = tmp3 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sub_tanh_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class TanhDeepLiftModelNew(nn.Module):
"""
Same as the ReLUDeepLiftModel, but with activations
that can have negative outputs
"""
def __init__(self):
super().__init__()
self.tanh1 = nn.Tanh()
self.tanh2 = nn.Tanh()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Europium248/captum
|
TanhDeepLiftModel
| false
| 426
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
NaiveGroupNorm
|
from torch.nn import Module
import torch
from torch.nn import Parameter
from torch.nn import init
import torch.nn.parallel
class NaiveGroupNorm(Module):
"""NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch.
It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX.
The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = NaiveGroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = NaiveGroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = NaiveGroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine',
'weight', 'bias']
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True):
super(NaiveGroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.Tensor(num_channels))
self.bias = Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
N, C, H, W = input.size()
assert C % self.num_groups == 0
input = input.reshape(N, self.num_groups, -1)
mean = input.mean(dim=-1, keepdim=True)
var = (input ** 2).mean(dim=-1, keepdim=True) - mean ** 2
std = torch.sqrt(var + self.eps)
input = (input - mean) / std
input = input.reshape(N, C, H, W)
if self.affine:
input = input * self.weight.reshape(1, C, 1, 1
) + self.bias.reshape(1, C, 1, 1)
return input
def extra_repr(self):
return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'.
format(**self.__dict__))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_groups': 1, 'num_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn import Parameter
from torch.nn import init
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp0 * tmp0
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp4 / tmp10
tmp12 = tmp9 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp0 - tmp11
tmp19 = tmp18 / tmp17
tmp21 = tmp19 * tmp20
tmp23 = tmp21 + tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp17, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp23, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3,
primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf4, primals_1, buf1, buf3
class NaiveGroupNormNew(Module):
"""NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch.
It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX.
The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = NaiveGroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = NaiveGroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = NaiveGroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine',
'weight', 'bias']
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True):
super(NaiveGroupNormNew, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.Tensor(num_channels))
self.bias = Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def extra_repr(self):
return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'.
format(**self.__dict__))
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EllisHui/outOfRailWay
|
NaiveGroupNorm
| false
| 427
|
[
"BSD-2-Clause"
] | 0
|
e3bf9aaa18879bee5536740d55006c872f06278f
|
https://github.com/EllisHui/outOfRailWay/tree/e3bf9aaa18879bee5536740d55006c872f06278f
|
GEGLU
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.gelu(gate) * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GEGLUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ClashLuke/gpt-neox
|
GEGLU
| false
| 428
|
[
"MIT"
] | 0
|
3291d0e6c867d9d328b96e8377f5b77c6f66c323
|
https://github.com/ClashLuke/gpt-neox/tree/3291d0e6c867d9d328b96e8377f5b77c6f66c323
|
MultiRelu
|
import torch
from torch import Tensor
from typing import Tuple
import torch.nn as nn
from typing import no_type_check
class MultiRelu(nn.Module):
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
@no_type_check
def forward(self, arg1: 'Tensor', arg2: 'Tensor') ->Tuple[Tensor, Tensor]:
return self.relu1(arg1), self.relu2(arg2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_relu_0[grid(256)](arg1_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg1_1
return buf0, buf1
class MultiReluNew(nn.Module):
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
Europium248/captum
|
MultiRelu
| false
| 429
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
BasicModel_MaxPool_ReLU
|
import torch
import torch.nn as nn
class BasicModel_MaxPool_ReLU(nn.Module):
def __init__(self, inplace=False):
super().__init__()
self.maxpool = nn.MaxPool1d(3)
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return self.relu(self.maxpool(x)).sum(dim=1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_sum_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class BasicModel_MaxPool_ReLUNew(nn.Module):
def __init__(self, inplace=False):
super().__init__()
self.maxpool = nn.MaxPool1d(3)
self.relu = nn.ReLU(inplace=inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Europium248/captum
|
BasicModel_MaxPool_ReLU
| false
| 430
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
AdaptiveCatAvgMaxPool2d
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
class AdaptiveCatAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_catavgmax_pool2d(x, self.output_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0)
triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf3,
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
class AdaptiveCatAvgMaxPool2dNew(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2dNew, self).__init__()
self.output_size = output_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Fanzhongjie/ARFE
|
AdaptiveCatAvgMaxPool2d
| false
| 431
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
GCN_Linear
|
from torch.nn import Module
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class sparse_dropout(Module):
"""
Sparse dropout implementation
"""
def __init__(self):
super(sparse_dropout, self).__init__()
def forward(self, input, p=0.5):
if self.training is True and p > 0.0:
random = torch.rand_like(input._values())
mask = random.ge(p)
new_indices = torch.masked_select(input._indices(), mask).reshape(
2, -1)
new_values = torch.masked_select(input._values(), mask)
output = torch.sparse.FloatTensor(new_indices, new_values,
input.shape)
return output
else:
return input
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, node_dropout=0.0,
edge_dropout=0.0, bias=True):
super(GraphConvolution, self).__init__()
self.sparse_dropout = sparse_dropout()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.zeros(in_features, out_features))
stdv = 1.0 / math.sqrt(self.weight.size(1))
nn.init.uniform_(self.weight, -stdv, stdv)
if bias:
self.bias = Parameter(torch.zeros(1, out_features))
nn.init.uniform_(self.bias, -stdv, stdv)
else:
self.register_parameter('bias', None)
self.node_dropout = node_dropout
self.edge_dropout = edge_dropout
def forward(self, input, adj):
adj = self.sparse_dropout(adj, self.edge_dropout)
support = torch.mm(input, self.weight)
support = F.dropout(support, self.node_dropout, training=self.training)
output = torch.sparse.mm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN_Linear(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN_Linear, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.linear2 = nn.Linear(nhid, nclass, bias=True)
self.dropout = dropout
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import math
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1,
(4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0)
del buf0
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4), (1, 4), 0)
del buf3
triton_poi_fused_add_relu_1[grid(16)](buf4, primals_4, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_4
buf5 = buf1
del buf1
extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_6
return buf5, buf4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class sparse_dropout(Module):
"""
Sparse dropout implementation
"""
def __init__(self):
super(sparse_dropout, self).__init__()
def forward(self, input, p=0.5):
if self.training is True and p > 0.0:
random = torch.rand_like(input._values())
mask = random.ge(p)
new_indices = torch.masked_select(input._indices(), mask).reshape(
2, -1)
new_values = torch.masked_select(input._values(), mask)
output = torch.sparse.FloatTensor(new_indices, new_values,
input.shape)
return output
else:
return input
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, node_dropout=0.0,
edge_dropout=0.0, bias=True):
super(GraphConvolution, self).__init__()
self.sparse_dropout = sparse_dropout()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.zeros(in_features, out_features))
stdv = 1.0 / math.sqrt(self.weight.size(1))
nn.init.uniform_(self.weight, -stdv, stdv)
if bias:
self.bias = Parameter(torch.zeros(1, out_features))
nn.init.uniform_(self.bias, -stdv, stdv)
else:
self.register_parameter('bias', None)
self.node_dropout = node_dropout
self.edge_dropout = edge_dropout
def forward(self, input, adj):
adj = self.sparse_dropout(adj, self.edge_dropout)
support = torch.mm(input, self.weight)
support = F.dropout(support, self.node_dropout, training=self.training)
output = torch.sparse.mm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN_LinearNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN_LinearNew, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.linear2 = nn.Linear(nhid, nclass, bias=True)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc1.bias
primals_2 = self.linear2.weight
primals_6 = self.linear2.bias
primals_3 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Eudialyte/SepGAT
|
GCN_Linear
| false
| 432
|
[
"MIT"
] | 0
|
6ea77714d1b2f2f5d0857cddcc9f1f5f9c0bcf50
|
https://github.com/Eudialyte/SepGAT/tree/6ea77714d1b2f2f5d0857cddcc9f1f5f9c0bcf50
|
GCN
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=
'same', stride=1, dilation=1, groups=1):
super(Conv2D, self).__init__()
assert type(kernel_size) in [int, tuple
], 'Allowed kernel type [int or tuple], not {}'.format(type(
kernel_size))
assert padding == 'same', 'Allowed padding type {}, not {}'.format(
'same', padding)
self.kernel_size = kernel_size
if isinstance(kernel_size, tuple):
self.h_kernel = kernel_size[0]
self.w_kernel = kernel_size[1]
else:
self.h_kernel = kernel_size
self.w_kernel = kernel_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=self.stride,
dilation=self.dilation, groups=self.groups)
def forward(self, x):
if self.padding == 'same':
height, width = x.shape[2:]
h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel -
height)
w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel -
width)
pad_left = w_pad_need // 2
pad_right = w_pad_need - pad_left
pad_top = h_pad_need // 2
pad_bottom = h_pad_need - pad_top
padding = pad_left, pad_right, pad_top, pad_bottom
x = F.pad(x, padding, 'constant', 0)
x = self.conv(x)
return x
class GCN(nn.Module):
"""
Large Kernel Matters -- https://arxiv.org/abs/1703.02719
"""
def __init__(self, in_channels, out_channels, k=3):
super(GCN, self).__init__()
self.conv_l1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
self.conv_l2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
def forward(self, x):
x1 = self.conv_l1(x)
x1 = self.conv_l2(x1)
x2 = self.conv_r1(x)
x2 = self.conv_r2(x2)
out = x1 + x2
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 6
x2 = xindex // 24
x3 = xindex % 24
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x4 = xindex // 6
x2 = xindex // 24 % 4
x5 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 6
x4 = xindex // 24
x5 = xindex % 24
x2 = xindex // 24 % 4
x6 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + x6, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(384)](primals_1, buf0, 384,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_1[grid(384)](buf1,
primals_3, buf2, 384, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
triton_poi_fused_constant_pad_nd_2[grid(384)](primals_1, buf4, 384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_3[grid(384)](buf5,
primals_7, buf6, 384, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del primals_7
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf3
del buf3
triton_poi_fused_add_convolution_4[grid(256)](buf8, primals_5, buf7,
primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del primals_5
del primals_9
return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2,
buf4, buf6)
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=
'same', stride=1, dilation=1, groups=1):
super(Conv2D, self).__init__()
assert type(kernel_size) in [int, tuple
], 'Allowed kernel type [int or tuple], not {}'.format(type(
kernel_size))
assert padding == 'same', 'Allowed padding type {}, not {}'.format(
'same', padding)
self.kernel_size = kernel_size
if isinstance(kernel_size, tuple):
self.h_kernel = kernel_size[0]
self.w_kernel = kernel_size[1]
else:
self.h_kernel = kernel_size
self.w_kernel = kernel_size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=self.stride,
dilation=self.dilation, groups=self.groups)
def forward(self, x):
if self.padding == 'same':
height, width = x.shape[2:]
h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel -
height)
w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel -
width)
pad_left = w_pad_need // 2
pad_right = w_pad_need - pad_left
pad_top = h_pad_need // 2
pad_bottom = h_pad_need - pad_top
padding = pad_left, pad_right, pad_top, pad_bottom
x = F.pad(x, padding, 'constant', 0)
x = self.conv(x)
return x
class GCNNew(nn.Module):
"""
Large Kernel Matters -- https://arxiv.org/abs/1703.02719
"""
def __init__(self, in_channels, out_channels, k=3):
super(GCNNew, self).__init__()
self.conv_l1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
self.conv_l2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r1 = Conv2D(in_channels=in_channels, out_channels=
out_channels, kernel_size=(1, k), padding='same')
self.conv_r2 = Conv2D(in_channels=out_channels, out_channels=
out_channels, kernel_size=(k, 1), padding='same')
def forward(self, input_0):
primals_2 = self.conv_l1.conv.weight
primals_3 = self.conv_l1.conv.bias
primals_4 = self.conv_l2.conv.weight
primals_5 = self.conv_l2.conv.bias
primals_6 = self.conv_r1.conv.weight
primals_7 = self.conv_r1.conv.bias
primals_8 = self.conv_r2.conv.weight
primals_9 = self.conv_r2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
EllisHui/outOfRailWay
|
GCN
| false
| 433
|
[
"BSD-2-Clause"
] | 0
|
e3bf9aaa18879bee5536740d55006c872f06278f
|
https://github.com/EllisHui/outOfRailWay/tree/e3bf9aaa18879bee5536740d55006c872f06278f
|
Linear2
|
import torch
import torch.nn.functional as F
from torch import nn
class Linear2(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(Linear2, self).__init__()
self.linear1 = nn.Linear(nfeat, nhid, bias=True)
self.linear2 = nn.Linear(nhid, nclass, bias=True)
self.dropout = dropout
def forward(self, x, adj=None):
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.linear1(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class Linear2New(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(Linear2New, self).__init__()
self.linear1 = nn.Linear(nfeat, nhid, bias=True)
self.linear2 = nn.Linear(nhid, nclass, bias=True)
self.dropout = dropout
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Eudialyte/SepGAT
|
Linear2
| false
| 434
|
[
"MIT"
] | 0
|
6ea77714d1b2f2f5d0857cddcc9f1f5f9c0bcf50
|
https://github.com/Eudialyte/SepGAT/tree/6ea77714d1b2f2f5d0857cddcc9f1f5f9c0bcf50
|
BalancedL1Loss
|
import functools
import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None, **kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta,
reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss_bbox
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 19.085536923187664
tmp7 = tmp3 * tmp6
tmp8 = tmp7 + tmp4
tmp9 = 0.02619784824562798
tmp10 = tmp8 * tmp9
tmp11 = tmp7 * tmp4
tmp12 = tmp11 + tmp4
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = 0.5
tmp16 = tmp3 * tmp15
tmp17 = tmp14 - tmp16
tmp18 = 1.5
tmp19 = tmp3 * tmp18
tmp20 = 0.07859354473688394
tmp21 = tmp19 + tmp20
tmp22 = tmp21 - tmp15
tmp23 = tl.where(tmp5, tmp17, tmp22)
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1LossNew(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1LossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Fanzhongjie/ARFE
|
BalancedL1Loss
| false
| 435
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
LinearMaxPoolLinearModel
|
import torch
import torch.nn as nn
class LinearMaxPoolLinearModel(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(4, 4, bias=False)
self.lin1.weight = nn.Parameter(torch.eye(4, 4))
self.pool1 = nn.MaxPool1d(4)
self.lin2 = nn.Linear(1, 1, bias=False)
self.lin2.weight = nn.Parameter(torch.ones(1, 1))
def forward(self, x):
x = x.unsqueeze(1)
return self.lin2(self.pool1(self.lin1(x))[:, 0, :])
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x0, tmp15, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.int8)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(4)](buf0, buf1,
buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (4, 1), (1, 0), 0),
primals_3, out=buf3)
return buf3, primals_1, reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4,
1), 0), buf1, reinterpret_tensor(buf2, (4, 1), (1, 1), 0), primals_3
class LinearMaxPoolLinearModelNew(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(4, 4, bias=False)
self.lin1.weight = nn.Parameter(torch.eye(4, 4))
self.pool1 = nn.MaxPool1d(4)
self.lin2 = nn.Linear(1, 1, bias=False)
self.lin2.weight = nn.Parameter(torch.ones(1, 1))
def forward(self, input_0):
primals_1 = self.lin1.weight
primals_3 = self.lin2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Europium248/captum
|
LinearMaxPoolLinearModel
| false
| 436
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
AdaptiveAvgMaxPool2d
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_adaptive_max_pool2d_add_mean_mul_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp35 = triton_helpers.maximum(tmp34, tmp33)
tmp36 = 16.0
tmp37 = tmp4 / tmp36
tmp38 = tmp37 + tmp35
tmp39 = 0.5
tmp40 = tmp38 * tmp39
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp40, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_adaptive_max_pool2d_add_mean_mul_0[grid(16)](buf2,
arg0_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
return buf2,
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2dNew(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2dNew, self).__init__()
self.output_size = output_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Fanzhongjie/ARFE
|
AdaptiveAvgMaxPool2d
| false
| 437
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
TinyCnn
|
import torch
import torch.nn as nn
class TinyCnn(nn.Module):
def __init__(self, feature_extraction=False):
super().__init__()
self.feature_extraction = feature_extraction
self.conv1 = nn.Conv2d(3, 3, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
if not self.feature_extraction:
self.conv2 = nn.Conv2d(3, 10, 2)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
if not self.feature_extraction:
x = self.conv2(x)
x = x.view(-1, 10)
else:
x = x.view(-1, 12)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 3
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 10800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x4 = xindex // 900
x3 = xindex // 2700
x5 = xindex % 2700
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 2720 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 2816 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 33640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (3, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (10, 3, 2, 2), (12, 4, 2, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 60, 60), (10800, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 3, 60, 60), (10848, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(43200)](buf0, primals_2,
buf1, 43200, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 3, 30, 30), (2720, 900, 30, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 3, 30, 30), (2816, 900, 30, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(10800)](buf1, buf2,
buf3, 10800, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 29, 29), (8410, 841, 29, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(33640)](buf5, primals_5, 33640,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
return reinterpret_tensor(buf5, (3364, 10), (10, 1), 0
), primals_1, primals_3, primals_4, buf1, buf2, buf3
class TinyCnnNew(nn.Module):
def __init__(self, feature_extraction=False):
super().__init__()
self.feature_extraction = feature_extraction
self.conv1 = nn.Conv2d(3, 3, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
if not self.feature_extraction:
self.conv2 = nn.Conv2d(3, 10, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Europium248/captum
|
TinyCnn
| false
| 438
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
ConvNorm
|
import torch
import torch.utils.data
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.
calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4), (4, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0)
class ConvNormNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNormNew, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.
calculate_gain(w_init_gain))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
FadyKhalaf/tacotron2
|
ConvNorm
| false
| 439
|
[
"BSD-3-Clause"
] | 0
|
d9bf28a6d286aab42bce46df9f26a9a3d7c2f01f
|
https://github.com/FadyKhalaf/tacotron2/tree/d9bf28a6d286aab42bce46df9f26a9a3d7c2f01f
|
TokenEmbedding
|
import torch
import torch.nn as nn
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 24
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 6
x2 = xindex
y1 = yindex // 6
tmp0 = y0
tmp1 = tl.full([1, 1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK])
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp4
tmp9 = tmp7 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp6
tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp13 = float('nan')
tmp14 = tl.where(tmp10, tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tmp3 >= tmp4
tmp18 = tmp3 < tmp1
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp2
tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp19, tmp21, tmp13)
tmp23 = tl.where(tmp5, tmp16, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.where(tmp30, tmp32, tmp13)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp26, tmp33, tmp34)
tmp36 = tmp0 >= tmp4
tmp37 = tmp0 < tmp1
tmp38 = tmp36 & tmp37
tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp38, tmp39, tmp13)
tmp41 = tl.where(tmp26, tmp35, tmp40)
tmp42 = tl.where(tmp2, tmp25, tmp41)
tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK
=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(64)](buf3, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1
class TokenEmbeddingNew(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbeddingNew, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, input_0):
primals_2 = self.tokenConv.weight
primals_3 = self.tokenConv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Fanxingye/Informer2020
|
TokenEmbedding
| false
| 440
|
[
"Apache-2.0"
] | 0
|
94fd05f82ff0882681a9716ae3e980a574fdcbed
|
https://github.com/Fanxingye/Informer2020/tree/94fd05f82ff0882681a9716ae3e980a574fdcbed
|
BasicModel_ConvNet_MaxPool3d
|
import torch
import torch.nn as nn
class BasicModel_ConvNet_MaxPool3d(nn.Module):
"""Same as above, but with the MaxPool1d replaced
with a MaxPool3d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv3d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool3d(2)
self.conv2 = nn.Conv3d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool3d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1906624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 238328 % 2
x0 = xindex % 3844
x5 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x5), tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 390224
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 24389 % 4
x0 = xindex % 24389
x4 = xindex // 24389
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 24416 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 87808
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 10976
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3, 3), (27, 27, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (4, 2, 3, 3, 3), (54, 27, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (10, 8), (8, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 62, 62, 62), (476656, 238328, 3844,
62, 1))
buf1 = empty_strided_cuda((4, 2, 62, 62, 62), (480128, 240064, 3872,
62, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1906624)](buf0, primals_2,
buf1, 1906624, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2,
2], [2, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 29, 29, 29), (97556, 24389, 841, 29, 1)
)
buf6 = empty_strided_cuda((4, 4, 29, 29, 29), (97664, 24416, 841,
29, 1), torch.float32)
triton_poi_fused_convolution_relu_1[grid(390224)](buf5, primals_5,
buf6, 390224, XBLOCK=512, num_warps=8, num_stages=1)
del buf5
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2,
2], [2, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
buf10 = empty_strided_cuda((10976, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (10976, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_2[grid(87808)](buf11, primals_7, 87808,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((10976, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8,
(8, 10), (1, 8), 0), alpha=1, beta=1, out=buf12)
del primals_9
buf15 = empty_strided_cuda((10976, 10), (10, 1), torch.float32)
triton_per_fused__softmax_3[grid(10976)](buf12, buf15, 10976, 10,
XBLOCK=32, num_warps=4, num_stages=1)
del buf12
return (buf15, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf6,
buf9, reinterpret_tensor(buf8, (10976, 4), (4, 1), 0), buf11, buf15,
primals_8, primals_6)
class BasicModel_ConvNet_MaxPool3dNew(nn.Module):
"""Same as above, but with the MaxPool1d replaced
with a MaxPool3d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv3d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool3d(2)
self.conv2 = nn.Conv3d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool3d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Europium248/captum
|
BasicModel_ConvNet_MaxPool3d
| false
| 441
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
CrossEntropyLoss
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.
size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred, label, weight=None, reduction='mean',
avg_factor=None, multi_cls_pred=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(pred, label.float(), weight,
reduction='none')
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
def cross_entropy(pred, label, multi_cls_pred=None, weight=None, reduction=
'mean', avg_factor=None):
"""
:param pred: [N, 81]
:param label: [N]
:param weight: [N]
:return:
"""
if multi_cls_pred is not None:
pred_all_cls = torch.softmax(multi_cls_pred, dim=-1)
max_pred_idx = list(torch.argmax(pred, dim=-1).view(-1).cpu().numpy())
loss = F.cross_entropy(pred, label, reduction='none')
if weight is not None:
weight = weight.float() + (0.0 if multi_cls_pred is None else 1.0 -
pred_all_cls[max_pred_idx])
elif multi_cls_pred is not None:
weight = 1.0 - pred_all_cls[max_pred_idx]
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction,
avg_factor=avg_factor)
return loss
def distribution_loss(pred, label, weight=None, reduction='mean',
avg_factor=None):
soft_pred = torch.softmax(pred, dim=-1)
max_ = torch.max(soft_pred, dim=-1).values.view(-1, 1)
soft_pred = soft_pred / (max_ + 1e-09)
max_pred = torch.max(soft_pred, dim=-1).values.view(-1, 1)
soft_pred = torch.where(soft_pred == max_pred, torch.zeros_like(
soft_pred), soft_pred)
alpha, beta, _gamma = 1.0, 0.0, 0.5
reg_pred = alpha * soft_pred + beta
dis_loss = 1.0 - torch.min((max_pred * 2.0 - soft_pred) * (1.0 - torch.
tanh(reg_pred)), dim=-1).values * 0.5
dis_loss = torch.mean(dis_loss)
loss = F.cross_entropy(pred, label, reduction='none')
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction,
avg_factor=avg_factor)
return loss + dis_loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=
None, multi_cls_pred=None):
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction
='mean')[None]
def multi_classes_loss(pred, label, weight=None, reduction='mean',
avg_factor=None):
pred = torch.softmax(pred.view(-1, 2), dim=-1)
labels = list(label.cpu().numpy())
pd = pred[:, 1]
ori = torch.zeros_like(pred[:, 1])
ori[labels] = 1
pos_cls_loss = torch.where(ori == 1, torch.tanh(1 - pd) + (pd < 0.5), ori)
pos_cls_loss = torch.max(pos_cls_loss)
neg_cls_loss = torch.where(ori == 0, torch.tanh(pd) + (pd > 0.5), torch
.zeros_like(ori))
neg_cls_loss = torch.max(neg_cls_loss)
return (pos_cls_loss + neg_cls_loss) * 0.5
class CrossEntropyLoss(nn.Module):
def __init__(self, use_sigmoid=False, use_mask=False, use_dis=False,
use_multi_cls=False, reduction='mean', loss_weight=1.0):
super(CrossEntropyLoss, self).__init__()
assert use_sigmoid is False or use_mask is False
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.use_dis = use_dis
self.reduction = reduction
self.loss_weight = loss_weight
self.use_multi_cls = use_multi_cls
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
elif self.use_dis:
self.cls_criterion = distribution_loss
elif self.use_multi_cls:
self.cls_criterion = multi_classes_loss
else:
self.cls_criterion = cross_entropy
def forward(self, cls_score, label, weight=None, avg_factor=None,
reduction_override=None, multi_cls_pred=None, **kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_cls = self.loss_weight * self.cls_criterion(cls_score, label,
weight, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = 64.0
tmp32 = tmp30 / tmp31
tmp33 = 1.0
tmp34 = tmp32 * tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2,
buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.
size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred, label, weight=None, reduction='mean',
avg_factor=None, multi_cls_pred=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(pred, label.float(), weight,
reduction='none')
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
def cross_entropy(pred, label, multi_cls_pred=None, weight=None, reduction=
'mean', avg_factor=None):
"""
:param pred: [N, 81]
:param label: [N]
:param weight: [N]
:return:
"""
if multi_cls_pred is not None:
pred_all_cls = torch.softmax(multi_cls_pred, dim=-1)
max_pred_idx = list(torch.argmax(pred, dim=-1).view(-1).cpu().numpy())
loss = F.cross_entropy(pred, label, reduction='none')
if weight is not None:
weight = weight.float() + (0.0 if multi_cls_pred is None else 1.0 -
pred_all_cls[max_pred_idx])
elif multi_cls_pred is not None:
weight = 1.0 - pred_all_cls[max_pred_idx]
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction,
avg_factor=avg_factor)
return loss
def distribution_loss(pred, label, weight=None, reduction='mean',
avg_factor=None):
soft_pred = torch.softmax(pred, dim=-1)
max_ = torch.max(soft_pred, dim=-1).values.view(-1, 1)
soft_pred = soft_pred / (max_ + 1e-09)
max_pred = torch.max(soft_pred, dim=-1).values.view(-1, 1)
soft_pred = torch.where(soft_pred == max_pred, torch.zeros_like(
soft_pred), soft_pred)
alpha, beta, _gamma = 1.0, 0.0, 0.5
reg_pred = alpha * soft_pred + beta
dis_loss = 1.0 - torch.min((max_pred * 2.0 - soft_pred) * (1.0 - torch.
tanh(reg_pred)), dim=-1).values * 0.5
dis_loss = torch.mean(dis_loss)
loss = F.cross_entropy(pred, label, reduction='none')
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction,
avg_factor=avg_factor)
return loss + dis_loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=
None, multi_cls_pred=None):
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction
='mean')[None]
def multi_classes_loss(pred, label, weight=None, reduction='mean',
avg_factor=None):
pred = torch.softmax(pred.view(-1, 2), dim=-1)
labels = list(label.cpu().numpy())
pd = pred[:, 1]
ori = torch.zeros_like(pred[:, 1])
ori[labels] = 1
pos_cls_loss = torch.where(ori == 1, torch.tanh(1 - pd) + (pd < 0.5), ori)
pos_cls_loss = torch.max(pos_cls_loss)
neg_cls_loss = torch.where(ori == 0, torch.tanh(pd) + (pd > 0.5), torch
.zeros_like(ori))
neg_cls_loss = torch.max(neg_cls_loss)
return (pos_cls_loss + neg_cls_loss) * 0.5
class CrossEntropyLossNew(nn.Module):
def __init__(self, use_sigmoid=False, use_mask=False, use_dis=False,
use_multi_cls=False, reduction='mean', loss_weight=1.0):
super(CrossEntropyLossNew, self).__init__()
assert use_sigmoid is False or use_mask is False
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.use_dis = use_dis
self.reduction = reduction
self.loss_weight = loss_weight
self.use_multi_cls = use_multi_cls
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
elif self.use_dis:
self.cls_criterion = distribution_loss
elif self.use_multi_cls:
self.cls_criterion = multi_classes_loss
else:
self.cls_criterion = cross_entropy
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Fanzhongjie/ARFE
|
CrossEntropyLoss
| false
| 442
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
h_sigmoid
|
import torch
import torch.nn as nn
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_sigmoidNew(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoidNew, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Felicia980317/mytorch
|
h_sigmoid
| false
| 443
|
[
"Apache-2.0"
] | 0
|
e463122c0d402878ec5b4c5a823a0feeba8fdbfe
|
https://github.com/Felicia980317/mytorch/tree/e463122c0d402878ec5b4c5a823a0feeba8fdbfe
|
MedianPool2d
|
import torch
from torch.nn.modules.utils import _pair
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
from torch.nn.modules.utils import _quadruple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1],
self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.utils import _pair
import torch.nn as nn
import torch.nn.parallel
from torch.nn.modules.utils import _quadruple
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex % 3
x4 = xindex // 3
y0 = yindex % 2
y1 = yindex // 2 % 2
y2 = yindex // 4
x6 = xindex
y5 = yindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x3 + y0) + -4 *
tl_math.abs(-3 + x4 + y1) + 16 * y2), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x6 + 9 * y5), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2, 3, 3), (144, 36, 18, 9, 3, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 9)](arg0_1, buf0, 64, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del arg0_1
buf1 = torch.ops.aten.median.dim(reinterpret_tensor(buf0, (4, 4, 2,
2, 9), (144, 36, 18, 9, 1), 0), -1)
del buf0
buf2 = buf1[0]
del buf1
return buf2,
class MedianPool2dNew(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2dNew, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Fanzhongjie/ARFE
|
MedianPool2d
| false
| 444
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
BasicModel_ConvNet_One_Conv
|
import torch
from torch import Tensor
import torch.nn as nn
from typing import Optional
from typing import no_type_check
class BasicModel_ConvNet_One_Conv(nn.Module):
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(8, 4)
self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3))
self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0]))
self.fc1.weight = nn.Parameter(torch.cat([torch.ones(4, 5), -1 *
torch.ones(4, 3)], dim=1))
self.fc1.bias = nn.Parameter(torch.zeros(4))
self.relu2 = nn.ReLU(inplace=inplace)
@no_type_check
def forward(self, x: 'Tensor', x2: 'Optional[Tensor]'=None):
if x2 is not None:
x = x + x2
x = self.relu1(self.conv1(x))
x = x.view(-1, 8)
return self.relu2(self.fc1(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 30752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3844 % 2
x0 = xindex % 3844
x3 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 3872 * x3), tmp4, xmask)
tl.store(out_ptr1 + (x0 + 3968 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_view_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 30752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3872 * (x0 // 3844) + x0 % 3844), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 15376
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 2, 62, 62), (7936, 3968, 62, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(30752)](
buf0, primals_2, buf1, buf6, 30752, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
buf2 = reinterpret_tensor(buf0, (3844, 8), (8, 1), 0)
del buf0
triton_poi_fused_convolution_relu_view_1[grid(30752)](buf1, buf2,
30752, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((3844, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (8, 4), (1, 8
), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((3844, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(15376)](buf4,
primals_5, buf5, 15376, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_1, primals_3, buf2, buf5, primals_4, buf6
class BasicModel_ConvNet_One_ConvNew(nn.Module):
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(8, 4)
self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3))
self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0]))
self.fc1.weight = nn.Parameter(torch.cat([torch.ones(4, 5), -1 *
torch.ones(4, 3)], dim=1))
self.fc1.bias = nn.Parameter(torch.zeros(4))
self.relu2 = nn.ReLU(inplace=inplace)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Europium248/captum
|
BasicModel_ConvNet_One_Conv
| false
| 445
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
GCN1
|
from torch.nn import Module
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class sparse_dropout(Module):
"""
Sparse dropout implementation
"""
def __init__(self):
super(sparse_dropout, self).__init__()
def forward(self, input, p=0.5):
if self.training is True and p > 0.0:
random = torch.rand_like(input._values())
mask = random.ge(p)
new_indices = torch.masked_select(input._indices(), mask).reshape(
2, -1)
new_values = torch.masked_select(input._values(), mask)
output = torch.sparse.FloatTensor(new_indices, new_values,
input.shape)
return output
else:
return input
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, node_dropout=0.0,
edge_dropout=0.0, bias=True):
super(GraphConvolution, self).__init__()
self.sparse_dropout = sparse_dropout()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.zeros(in_features, out_features))
stdv = 1.0 / math.sqrt(self.weight.size(1))
nn.init.uniform_(self.weight, -stdv, stdv)
if bias:
self.bias = Parameter(torch.zeros(1, out_features))
nn.init.uniform_(self.bias, -stdv, stdv)
else:
self.register_parameter('bias', None)
self.node_dropout = node_dropout
self.edge_dropout = edge_dropout
def forward(self, input, adj):
adj = self.sparse_dropout(adj, self.edge_dropout)
support = torch.mm(input, self.weight)
support = F.dropout(support, self.node_dropout, training=self.training)
output = torch.sparse.mm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN1(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN1, self).__init__()
self.gc1 = GraphConvolution(nfeat, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc1(x, adj)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1,
(4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0)
del buf0
del buf1
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4), (1, 4), 0)
del buf3
triton_poi_fused_add_1[grid(16)](buf4, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
return buf4, primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class sparse_dropout(Module):
"""
Sparse dropout implementation
"""
def __init__(self):
super(sparse_dropout, self).__init__()
def forward(self, input, p=0.5):
if self.training is True and p > 0.0:
random = torch.rand_like(input._values())
mask = random.ge(p)
new_indices = torch.masked_select(input._indices(), mask).reshape(
2, -1)
new_values = torch.masked_select(input._values(), mask)
output = torch.sparse.FloatTensor(new_indices, new_values,
input.shape)
return output
else:
return input
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, node_dropout=0.0,
edge_dropout=0.0, bias=True):
super(GraphConvolution, self).__init__()
self.sparse_dropout = sparse_dropout()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.zeros(in_features, out_features))
stdv = 1.0 / math.sqrt(self.weight.size(1))
nn.init.uniform_(self.weight, -stdv, stdv)
if bias:
self.bias = Parameter(torch.zeros(1, out_features))
nn.init.uniform_(self.bias, -stdv, stdv)
else:
self.register_parameter('bias', None)
self.node_dropout = node_dropout
self.edge_dropout = edge_dropout
def forward(self, input, adj):
adj = self.sparse_dropout(adj, self.edge_dropout)
support = torch.mm(input, self.weight)
support = F.dropout(support, self.node_dropout, training=self.training)
output = torch.sparse.mm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN1New(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN1New, self).__init__()
self.gc1 = GraphConvolution(nfeat, nclass)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc1.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Eudialyte/SepGAT
|
GCN1
| false
| 446
|
[
"MIT"
] | 0
|
6ea77714d1b2f2f5d0857cddcc9f1f5f9c0bcf50
|
https://github.com/Eudialyte/SepGAT/tree/6ea77714d1b2f2f5d0857cddcc9f1f5f9c0bcf50
|
Tanh
|
import torch
import torch.nn as nn
import torch.nn.parallel
class Tanh(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TanhNew(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(TanhNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Fanzhongjie/ARFE
|
Tanh
| false
| 448
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
SoftmaxDeepLiftModel
|
import torch
import torch.nn as nn
class SoftmaxDeepLiftModel(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out):
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_hidden, num_hidden))
self.lin3.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
lin1 = self.relu1(self.lin1(input))
lin2 = self.relu2(self.lin2(lin1))
lin3 = self.lin3(lin2)
return self.softmax(lin3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_hidden': 4, 'num_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class SoftmaxDeepLiftModelNew(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out):
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_hidden, num_hidden))
self.lin3.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.lin1.weight
primals_2 = self.lin1.bias
primals_4 = self.lin2.weight
primals_5 = self.lin2.bias
primals_6 = self.lin3.weight
primals_7 = self.lin3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Europium248/captum
|
SoftmaxDeepLiftModel
| false
| 449
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
SEModule
|
import torch
import torch.nn as nn
import torch.nn.parallel
class SEModule(nn.Module):
def __init__(self, channels, reduction=16, act_layer=nn.ReLU):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
reduction_channels = max(channels // reduction, 8)
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1,
padding=0, bias=True)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1,
padding=0, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.fc1(x_se)
x_se = self.act(x_se)
x_se = self.fc2(x_se)
return x * x_se.sigmoid()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(32)](buf3, primals_3, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5
class SEModuleNew(nn.Module):
def __init__(self, channels, reduction=16, act_layer=nn.ReLU):
super(SEModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
reduction_channels = max(channels // reduction, 8)
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1,
padding=0, bias=True)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1,
padding=0, bias=True)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Fanzhongjie/ARFE
|
SEModule
| false
| 450
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
MockModule
|
import torch
import torch.nn as nn
import torch.nn
import torch.optim
class MockModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 16, kernel_size=1)
def forward(self, x: 'torch.Tensor'):
return self.conv(x).mean(3).mean(2)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_convolution_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x4 = xindex
x1 = xindex // 64 % 16
tmp0 = tl.load(in_ptr0 + (r3 + 64 * x4), None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x4, tmp5, None)
@triton.jit
def triton_per_fused_convolution_mean_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp6 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 16, 64), (1024, 64, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_convolution_mean_0[grid(4096)](buf0, primals_2,
buf1, 4096, 64, XBLOCK=32, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_convolution_mean_1[grid(64)](buf3, buf1, 64, 64,
XBLOCK=8, num_warps=4, num_stages=1)
del buf1
return buf3, primals_1, primals_3
class MockModuleNew(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 16, kernel_size=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
FebruaryBreeze/torch-parameter-groups
|
MockModule
| false
| 451
|
[
"MIT"
] | 0
|
e90c3d451c1afcfe5267801d5cfcc5413777b1d8
|
https://github.com/FebruaryBreeze/torch-parameter-groups/tree/e90c3d451c1afcfe5267801d5cfcc5413777b1d8
|
ShuffleBlock
|
import torch
import torch.nn as nn
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C // g, H, W).permute(0, 2, 1, 3, 4).reshape(N,
C, H, W)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 2
x2 = xindex // 32 % 2
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 32 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 2, 4, 4), (64, 32, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class ShuffleBlockNew(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlockNew, self).__init__()
self.groups = groups
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Fede112/pytorch-cifar
|
ShuffleBlock
| false
| 452
|
[
"MIT"
] | 0
|
2e89028e9aaabb22dd944f0002d29548059c24df
|
https://github.com/Fede112/pytorch-cifar/tree/2e89028e9aaabb22dd944f0002d29548059c24df
|
SplitAttnConv2d
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
class RadixSoftmax(nn.Module):
def __init__(self, radix, cardinality):
super(RadixSoftmax, self).__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplitAttnConv2d(nn.Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, radix=2,
reduction_factor=4, act_layer=nn.ReLU, norm_layer=None, drop_block=
None, **kwargs):
super(SplitAttnConv2d, self).__init__()
self.radix = radix
self.drop_block = drop_block
mid_chs = out_channels * radix
attn_chs = max(in_channels * radix // reduction_factor, 32)
self.conv = nn.Conv2d(in_channels, mid_chs, kernel_size, stride,
padding, dilation, groups=groups * radix, bias=bias, **kwargs)
self.bn0 = norm_layer(mid_chs) if norm_layer is not None else None
self.act0 = act_layer(inplace=True)
self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups)
self.bn1 = norm_layer(attn_chs) if norm_layer is not None else None
self.act1 = act_layer(inplace=True)
self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups)
self.rsoftmax = RadixSoftmax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.bn0 is not None:
x = self.bn0(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act0(x)
B, RC, H, W = x.shape
if self.radix > 1:
x = x.reshape((B, self.radix, RC // self.radix, H, W))
x_gap = x.sum(dim=1)
else:
x_gap = x
x_gap = F.adaptive_avg_pool2d(x_gap, 1)
x_gap = self.fc1(x_gap)
if self.bn1 is not None:
x_gap = self.bn1(x_gap)
x_gap = self.act1(x_gap)
x_attn = self.fc2(x_gap)
x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1)
if self.radix > 1:
out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))
).sum(dim=1)
else:
out = x * x_attn
return out.contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp6 = 1.0
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp3 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp4 - tmp5
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = tmp2 * tmp11
tmp14 = triton_helpers.maximum(tmp1, tmp13)
tmp15 = tmp9 / tmp10
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (8, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (32, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (8, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_6, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sum_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 1, 1), (32, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(128)](buf3, primals_4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 8, 1, 1), (8, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(32)](buf5, primals_6, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_mul_sum_3[grid(16)](buf0, buf5, buf6, 16, XBLOCK=
16, num_warps=1, num_stages=1)
return (buf6, primals_1, primals_2, primals_3, primals_5, buf0, buf1,
buf3, buf5)
class RadixSoftmax(nn.Module):
def __init__(self, radix, cardinality):
super(RadixSoftmax, self).__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplitAttnConv2dNew(nn.Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, radix=2,
reduction_factor=4, act_layer=nn.ReLU, norm_layer=None, drop_block=
None, **kwargs):
super(SplitAttnConv2dNew, self).__init__()
self.radix = radix
self.drop_block = drop_block
mid_chs = out_channels * radix
attn_chs = max(in_channels * radix // reduction_factor, 32)
self.conv = nn.Conv2d(in_channels, mid_chs, kernel_size, stride,
padding, dilation, groups=groups * radix, bias=bias, **kwargs)
self.bn0 = norm_layer(mid_chs) if norm_layer is not None else None
self.act0 = act_layer(inplace=True)
self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups)
self.bn1 = norm_layer(attn_chs) if norm_layer is not None else None
self.act1 = act_layer(inplace=True)
self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups)
self.rsoftmax = RadixSoftmax(radix, groups)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Fanzhongjie/ARFE
|
SplitAttnConv2d
| false
| 453
|
[
"Apache-2.0"
] | 0
|
4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
https://github.com/Fanzhongjie/ARFE/tree/4b96b8c5bc0895d3d30acec2a490f81a860fe860
|
CNN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, input_size=50, hidden_size=256, dropout=0,
kernel_size=3, padding=1, activation_function=F.relu):
"""
Args:
input_size: dimention of input embedding
kernel_size: kernel_size for CNN
padding: padding for CNN
hidden_size: hidden size
"""
super().__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size, padding
=padding)
self.act = activation_function
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
Args:
input features: (B, L, I_EMBED)
Return:
output features: (B, H_EMBED)
"""
x = x.transpose(1, 2)
x = self.conv(x)
x = self.act(x)
x = self.dropout(x)
x = x.transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 50, 50])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 200
xnumel = 50
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 50
y1 = yindex // 50
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 50 * x2 + 2500 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 50 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 50 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 50, 50), (2500, 50, 1))
assert_size_stride(primals_2, (256, 50, 3), (150, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 50, 50), (2500, 50, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(200, 50)](primals_1, buf0, 200,
50, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 50), (12800, 50, 1))
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 256, 50), (12800, 50, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(51200)](
buf2, primals_3, buf3, 51200, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_3
return reinterpret_tensor(buf2, (4, 50, 256), (12800, 1, 50), 0
), primals_2, reinterpret_tensor(primals_1, (4, 50, 50), (2500, 1,
50), 0), buf3
class CNNNew(nn.Module):
def __init__(self, input_size=50, hidden_size=256, dropout=0,
kernel_size=3, padding=1, activation_function=F.relu):
"""
Args:
input_size: dimention of input embedding
kernel_size: kernel_size for CNN
padding: padding for CNN
hidden_size: hidden size
"""
super().__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size, padding
=padding)
self.act = activation_function
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Fatead/NER
|
CNN
| false
| 454
|
[
"MIT"
] | 0
|
142fabb2fcb1b730042da7acfde10199c62537d5
|
https://github.com/Fatead/NER/tree/142fabb2fcb1b730042da7acfde10199c62537d5
|
BasicModel_ConvNet
|
import torch
from torch import Tensor
import torch.nn as nn
from typing import no_type_check
class BasicModel_ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(2, 4, 3, 1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
@no_type_check
def forward(self, x: 'Tensor') ->Tensor:
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 30752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 2
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 7688
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = xindex // 31 % 31
x4 = xindex // 961
x3 = xindex // 1922
x5 = xindex % 1922
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 1952 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 2048 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 13456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 4
x2 = xindex // 3364
x4 = xindex % 3364
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 3392 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196 % 4
x3 = xindex // 784
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3
), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x4, tmp15, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 6272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (4, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (10, 8), (8, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(30752)](buf0, primals_2,
buf1, 30752, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 2, 31, 31), (1952, 961, 31, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 2, 31, 31), (2048, 961, 31, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(7688)](buf1, buf2,
buf3, 7688, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 29, 29), (3364, 841, 29, 1))
buf5 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch
.float32)
triton_poi_fused_convolution_relu_2[grid(13456)](buf4, primals_5,
buf5, 13456, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.int8
)
buf7 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(3136)](buf5, buf6,
buf7, 3136, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((784, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (784, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(6272)](buf9, primals_7, 6272, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(8, 10), (1, 8), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
triton_per_fused__softmax_5[grid(784)](buf10, buf13, 784, 10,
XBLOCK=8, num_warps=2, num_stages=1)
del buf10
return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (784, 4), (4, 1), 0), buf9, buf13,
primals_8, primals_6)
class BasicModel_ConvNetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(2, 4, 3, 1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Europium248/captum
|
BasicModel_ConvNet
| false
| 455
|
[
"BSD-3-Clause"
] | 0
|
ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
https://github.com/Europium248/captum/tree/ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.